xref: /spdk/lib/accel/accel.c (revision 698b2423d5f98e56c36dcf8484205bb034d0f6f5)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2020 Intel Corporation.
3  *   Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES.
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/accel_module.h"
10 
11 #include "accel_internal.h"
12 
13 #include "spdk/dma.h"
14 #include "spdk/env.h"
15 #include "spdk/likely.h"
16 #include "spdk/log.h"
17 #include "spdk/thread.h"
18 #include "spdk/json.h"
19 #include "spdk/crc32.h"
20 #include "spdk/util.h"
21 #include "spdk/hexlify.h"
22 #include "spdk/string.h"
23 
24 /* Accelerator Framework: The following provides a top level
25  * generic API for the accelerator functions defined here. Modules,
26  * such as the one in /module/accel/ioat, supply the implementation
27  * with the exception of the pure software implementation contained
28  * later in this file.
29  */
30 
31 #define ALIGN_4K			0x1000
32 #define ACCEL_TASKS_PER_CHANNEL		2048
33 #define ACCEL_SMALL_CACHE_SIZE		128
34 #define ACCEL_LARGE_CACHE_SIZE		16
35 /* Set MSB, so we don't return NULL pointers as buffers */
36 #define ACCEL_BUFFER_BASE		((void *)(1ull << 63))
37 #define ACCEL_BUFFER_OFFSET_MASK	((uintptr_t)ACCEL_BUFFER_BASE - 1)
38 
39 #define ACCEL_CRYPTO_TWEAK_MODE_DEFAULT	SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA
40 #define ACCEL_TASKS_IN_SEQUENCE_LIMIT	8
41 
42 struct accel_module {
43 	struct spdk_accel_module_if	*module;
44 	bool				supports_memory_domains;
45 };
46 
47 /* Largest context size for all accel modules */
48 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task);
49 
50 static struct spdk_accel_module_if *g_accel_module = NULL;
51 static spdk_accel_fini_cb g_fini_cb_fn = NULL;
52 static void *g_fini_cb_arg = NULL;
53 static bool g_modules_started = false;
54 static struct spdk_memory_domain *g_accel_domain;
55 
56 /* Global list of registered accelerator modules */
57 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list =
58 	TAILQ_HEAD_INITIALIZER(spdk_accel_module_list);
59 
60 /* Crypto keyring */
61 static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring);
62 static struct spdk_spinlock g_keyring_spin;
63 
64 /* Global array mapping capabilities to modules */
65 static struct accel_module g_modules_opc[SPDK_ACCEL_OPC_LAST] = {};
66 static char *g_modules_opc_override[SPDK_ACCEL_OPC_LAST] = {};
67 TAILQ_HEAD(, spdk_accel_driver) g_accel_drivers = TAILQ_HEAD_INITIALIZER(g_accel_drivers);
68 static struct spdk_accel_driver *g_accel_driver;
69 static struct spdk_accel_opts g_opts = {
70 	.small_cache_size = ACCEL_SMALL_CACHE_SIZE,
71 	.large_cache_size = ACCEL_LARGE_CACHE_SIZE,
72 	.task_count = ACCEL_TASKS_PER_CHANNEL,
73 	.sequence_count = ACCEL_TASKS_PER_CHANNEL,
74 	.buf_count = ACCEL_TASKS_PER_CHANNEL,
75 };
76 static struct accel_stats g_stats;
77 static struct spdk_spinlock g_stats_lock;
78 
79 static const char *g_opcode_strings[SPDK_ACCEL_OPC_LAST] = {
80 	"copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c",
81 	"compress", "decompress", "encrypt", "decrypt", "xor",
82 	"dif_verify", "dif_verify_copy", "dif_generate", "dif_generate_copy",
83 	"dix_generate", "dix_verify"
84 };
85 
86 enum accel_sequence_state {
87 	ACCEL_SEQUENCE_STATE_INIT,
88 	ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF,
89 	ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF,
90 	ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF,
91 	ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF,
92 	ACCEL_SEQUENCE_STATE_PULL_DATA,
93 	ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA,
94 	ACCEL_SEQUENCE_STATE_EXEC_TASK,
95 	ACCEL_SEQUENCE_STATE_AWAIT_TASK,
96 	ACCEL_SEQUENCE_STATE_COMPLETE_TASK,
97 	ACCEL_SEQUENCE_STATE_NEXT_TASK,
98 	ACCEL_SEQUENCE_STATE_PUSH_DATA,
99 	ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA,
100 	ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS,
101 	ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS,
102 	ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS,
103 	ACCEL_SEQUENCE_STATE_ERROR,
104 	ACCEL_SEQUENCE_STATE_MAX,
105 };
106 
107 static const char *g_seq_states[]
108 __attribute__((unused)) = {
109 	[ACCEL_SEQUENCE_STATE_INIT] = "init",
110 	[ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf",
111 	[ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf",
112 	[ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf",
113 	[ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf",
114 	[ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data",
115 	[ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data",
116 	[ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task",
117 	[ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task",
118 	[ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task",
119 	[ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task",
120 	[ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data",
121 	[ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data",
122 	[ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS] = "driver-exec-tasks",
123 	[ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS] = "driver-await-tasks",
124 	[ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS] = "driver-complete-tasks",
125 	[ACCEL_SEQUENCE_STATE_ERROR] = "error",
126 	[ACCEL_SEQUENCE_STATE_MAX] = "",
127 };
128 
129 #define ACCEL_SEQUENCE_STATE_STRING(s) \
130 	(((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \
131 	 ? g_seq_states[s] : "unknown")
132 
133 struct accel_buffer {
134 	struct spdk_accel_sequence	*seq;
135 	void				*buf;
136 	uint64_t			len;
137 	struct spdk_iobuf_entry		iobuf;
138 	spdk_accel_sequence_get_buf_cb	cb_fn;
139 	void				*cb_ctx;
140 	SLIST_ENTRY(accel_buffer)	link;
141 	struct accel_io_channel		*ch;
142 };
143 
144 struct accel_io_channel {
145 	struct spdk_io_channel			*module_ch[SPDK_ACCEL_OPC_LAST];
146 	struct spdk_io_channel			*driver_channel;
147 	void					*task_pool_base;
148 	struct spdk_accel_sequence		*seq_pool_base;
149 	struct accel_buffer			*buf_pool_base;
150 	struct spdk_accel_task_aux_data		*task_aux_data_base;
151 	STAILQ_HEAD(, spdk_accel_task)		task_pool;
152 	SLIST_HEAD(, spdk_accel_task_aux_data)	task_aux_data_pool;
153 	SLIST_HEAD(, spdk_accel_sequence)	seq_pool;
154 	SLIST_HEAD(, accel_buffer)		buf_pool;
155 	struct spdk_iobuf_channel		iobuf;
156 	struct accel_stats			stats;
157 };
158 
159 TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task);
160 
161 struct spdk_accel_sequence {
162 	struct accel_io_channel			*ch;
163 	struct accel_sequence_tasks		tasks;
164 	SLIST_HEAD(, accel_buffer)		bounce_bufs;
165 	int					status;
166 	/* state uses enum accel_sequence_state */
167 	uint8_t					state;
168 	bool					in_process_sequence;
169 	spdk_accel_completion_cb		cb_fn;
170 	void					*cb_arg;
171 	SLIST_ENTRY(spdk_accel_sequence)	link;
172 };
173 SPDK_STATIC_ASSERT(sizeof(struct spdk_accel_sequence) == 64, "invalid size");
174 
175 #define accel_update_stats(ch, event, v) \
176 	do { \
177 		(ch)->stats.event += (v); \
178 	} while (0)
179 
180 #define accel_update_task_stats(ch, task, event, v) \
181 	accel_update_stats(ch, operations[(task)->op_code].event, v)
182 
183 static inline void accel_sequence_task_cb(void *cb_arg, int status);
184 
185 static inline void
186 accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state)
187 {
188 	SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq,
189 		      ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state));
190 	assert(seq->state != ACCEL_SEQUENCE_STATE_ERROR || state == ACCEL_SEQUENCE_STATE_ERROR);
191 	seq->state = state;
192 }
193 
194 static void
195 accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status)
196 {
197 	accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
198 	assert(status != 0);
199 	seq->status = status;
200 }
201 
202 int
203 spdk_accel_get_opc_module_name(enum spdk_accel_opcode opcode, const char **module_name)
204 {
205 	if (opcode >= SPDK_ACCEL_OPC_LAST) {
206 		/* invalid opcode */
207 		return -EINVAL;
208 	}
209 
210 	if (g_modules_opc[opcode].module) {
211 		*module_name = g_modules_opc[opcode].module->name;
212 	} else {
213 		return -ENOENT;
214 	}
215 
216 	return 0;
217 }
218 
219 void
220 _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn)
221 {
222 	struct spdk_accel_module_if *accel_module;
223 	enum spdk_accel_opcode opcode;
224 	int j = 0;
225 
226 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
227 		for (opcode = 0; opcode < SPDK_ACCEL_OPC_LAST; opcode++) {
228 			if (accel_module->supports_opcode(opcode)) {
229 				info->ops[j] = opcode;
230 				j++;
231 			}
232 		}
233 		info->name = accel_module->name;
234 		info->num_ops = j;
235 		fn(info);
236 		j = 0;
237 	}
238 }
239 
240 const char *
241 spdk_accel_get_opcode_name(enum spdk_accel_opcode opcode)
242 {
243 	if (opcode < SPDK_ACCEL_OPC_LAST) {
244 		return g_opcode_strings[opcode];
245 	}
246 
247 	return NULL;
248 }
249 
250 int
251 spdk_accel_assign_opc(enum spdk_accel_opcode opcode, const char *name)
252 {
253 	char *copy;
254 
255 	if (g_modules_started == true) {
256 		/* we don't allow re-assignment once things have started */
257 		return -EINVAL;
258 	}
259 
260 	if (opcode >= SPDK_ACCEL_OPC_LAST) {
261 		/* invalid opcode */
262 		return -EINVAL;
263 	}
264 
265 	copy = strdup(name);
266 	if (copy == NULL) {
267 		return -ENOMEM;
268 	}
269 
270 	/* module selection will be validated after the framework starts. */
271 	free(g_modules_opc_override[opcode]);
272 	g_modules_opc_override[opcode] = copy;
273 
274 	return 0;
275 }
276 
277 inline static struct spdk_accel_task *
278 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg)
279 {
280 	struct spdk_accel_task *accel_task;
281 
282 	accel_task = STAILQ_FIRST(&accel_ch->task_pool);
283 	if (spdk_unlikely(accel_task == NULL)) {
284 		accel_update_stats(accel_ch, retry.task, 1);
285 		return NULL;
286 	}
287 
288 	accel_update_stats(accel_ch, task_outstanding, 1);
289 	STAILQ_REMOVE_HEAD(&accel_ch->task_pool, link);
290 	accel_task->link.stqe_next = NULL;
291 
292 	accel_task->cb_fn = cb_fn;
293 	accel_task->cb_arg = cb_arg;
294 	accel_task->accel_ch = accel_ch;
295 	accel_task->s.iovs = NULL;
296 	accel_task->d.iovs = NULL;
297 
298 	return accel_task;
299 }
300 
301 static void
302 _put_task(struct accel_io_channel *ch, struct spdk_accel_task *task)
303 {
304 	STAILQ_INSERT_HEAD(&ch->task_pool, task, link);
305 	accel_update_stats(ch, task_outstanding, -1);
306 }
307 
308 void
309 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status)
310 {
311 	struct accel_io_channel		*accel_ch = accel_task->accel_ch;
312 	spdk_accel_completion_cb	cb_fn;
313 	void				*cb_arg;
314 
315 	accel_update_task_stats(accel_ch, accel_task, executed, 1);
316 	accel_update_task_stats(accel_ch, accel_task, num_bytes, accel_task->nbytes);
317 	if (spdk_unlikely(status != 0)) {
318 		accel_update_task_stats(accel_ch, accel_task, failed, 1);
319 	}
320 
321 	if (accel_task->seq) {
322 		accel_sequence_task_cb(accel_task->seq, status);
323 		return;
324 	}
325 
326 	cb_fn = accel_task->cb_fn;
327 	cb_arg = accel_task->cb_arg;
328 
329 	if (accel_task->has_aux) {
330 		SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task->aux, link);
331 		accel_task->aux = NULL;
332 		accel_task->has_aux = false;
333 	}
334 
335 	/* We should put the accel_task into the list firstly in order to avoid
336 	 * the accel task list is exhausted when there is recursive call to
337 	 * allocate accel_task in user's call back function (cb_fn)
338 	 */
339 	_put_task(accel_ch, accel_task);
340 
341 	cb_fn(cb_arg, status);
342 }
343 
344 static inline int
345 accel_submit_task(struct accel_io_channel *accel_ch, struct spdk_accel_task *task)
346 {
347 	struct spdk_io_channel *module_ch = accel_ch->module_ch[task->op_code];
348 	struct spdk_accel_module_if *module = g_modules_opc[task->op_code].module;
349 	int rc;
350 
351 	rc = module->submit_tasks(module_ch, task);
352 	if (spdk_unlikely(rc != 0)) {
353 		accel_update_task_stats(accel_ch, task, failed, 1);
354 	}
355 
356 	return rc;
357 }
358 
359 static inline uint64_t
360 accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt)
361 {
362 	uint64_t result = 0;
363 	uint32_t i;
364 
365 	for (i = 0; i < iovcnt; ++i) {
366 		result += iovs[i].iov_len;
367 	}
368 
369 	return result;
370 }
371 
372 #define ACCEL_TASK_ALLOC_AUX_BUF(task)						\
373 do {										\
374         (task)->aux = SLIST_FIRST(&(task)->accel_ch->task_aux_data_pool);	\
375         if (spdk_unlikely(!(task)->aux)) {					\
376                 SPDK_ERRLOG("Fatal problem, aux data was not allocated\n");	\
377                 _put_task(task->accel_ch, task);				\
378                 assert(0);							\
379                 return -ENOMEM;							\
380         }									\
381         SLIST_REMOVE_HEAD(&(task)->accel_ch->task_aux_data_pool, link);		\
382         (task)->has_aux = true;							\
383 } while (0)
384 
385 /* Accel framework public API for copy function */
386 int
387 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src,
388 		       uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
389 {
390 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
391 	struct spdk_accel_task *accel_task;
392 
393 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
394 	if (spdk_unlikely(accel_task == NULL)) {
395 		return -ENOMEM;
396 	}
397 
398 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
399 
400 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
401 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
402 	accel_task->d.iovs[0].iov_base = dst;
403 	accel_task->d.iovs[0].iov_len = nbytes;
404 	accel_task->d.iovcnt = 1;
405 	accel_task->s.iovs[0].iov_base = src;
406 	accel_task->s.iovs[0].iov_len = nbytes;
407 	accel_task->s.iovcnt = 1;
408 	accel_task->nbytes = nbytes;
409 	accel_task->op_code = SPDK_ACCEL_OPC_COPY;
410 	accel_task->src_domain = NULL;
411 	accel_task->dst_domain = NULL;
412 
413 	return accel_submit_task(accel_ch, accel_task);
414 }
415 
416 /* Accel framework public API for dual cast copy function */
417 int
418 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1,
419 			   void *dst2, void *src, uint64_t nbytes,
420 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
421 {
422 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
423 	struct spdk_accel_task *accel_task;
424 
425 	if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) {
426 		SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n");
427 		return -EINVAL;
428 	}
429 
430 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
431 	if (spdk_unlikely(accel_task == NULL)) {
432 		return -ENOMEM;
433 	}
434 
435 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
436 
437 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
438 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
439 	accel_task->d2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST2];
440 	accel_task->d.iovs[0].iov_base = dst1;
441 	accel_task->d.iovs[0].iov_len = nbytes;
442 	accel_task->d.iovcnt = 1;
443 	accel_task->d2.iovs[0].iov_base = dst2;
444 	accel_task->d2.iovs[0].iov_len = nbytes;
445 	accel_task->d2.iovcnt = 1;
446 	accel_task->s.iovs[0].iov_base = src;
447 	accel_task->s.iovs[0].iov_len = nbytes;
448 	accel_task->s.iovcnt = 1;
449 	accel_task->nbytes = nbytes;
450 	accel_task->op_code = SPDK_ACCEL_OPC_DUALCAST;
451 	accel_task->src_domain = NULL;
452 	accel_task->dst_domain = NULL;
453 
454 	return accel_submit_task(accel_ch, accel_task);
455 }
456 
457 /* Accel framework public API for compare function */
458 
459 int
460 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1,
461 			  void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
462 			  void *cb_arg)
463 {
464 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
465 	struct spdk_accel_task *accel_task;
466 
467 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
468 	if (spdk_unlikely(accel_task == NULL)) {
469 		return -ENOMEM;
470 	}
471 
472 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
473 
474 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
475 	accel_task->s2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC2];
476 	accel_task->s.iovs[0].iov_base = src1;
477 	accel_task->s.iovs[0].iov_len = nbytes;
478 	accel_task->s.iovcnt = 1;
479 	accel_task->s2.iovs[0].iov_base = src2;
480 	accel_task->s2.iovs[0].iov_len = nbytes;
481 	accel_task->s2.iovcnt = 1;
482 	accel_task->nbytes = nbytes;
483 	accel_task->op_code = SPDK_ACCEL_OPC_COMPARE;
484 	accel_task->src_domain = NULL;
485 	accel_task->dst_domain = NULL;
486 
487 	return accel_submit_task(accel_ch, accel_task);
488 }
489 
490 /* Accel framework public API for fill function */
491 int
492 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst,
493 		       uint8_t fill, uint64_t nbytes,
494 		       spdk_accel_completion_cb cb_fn, void *cb_arg)
495 {
496 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
497 	struct spdk_accel_task *accel_task;
498 
499 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
500 	if (spdk_unlikely(accel_task == NULL)) {
501 		return -ENOMEM;
502 	}
503 
504 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
505 
506 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
507 	accel_task->d.iovs[0].iov_base = dst;
508 	accel_task->d.iovs[0].iov_len = nbytes;
509 	accel_task->d.iovcnt = 1;
510 	accel_task->nbytes = nbytes;
511 	memset(&accel_task->fill_pattern, fill, sizeof(uint64_t));
512 	accel_task->op_code = SPDK_ACCEL_OPC_FILL;
513 	accel_task->src_domain = NULL;
514 	accel_task->dst_domain = NULL;
515 
516 	return accel_submit_task(accel_ch, accel_task);
517 }
518 
519 /* Accel framework public API for CRC-32C function */
520 int
521 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst,
522 			 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
523 			 void *cb_arg)
524 {
525 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
526 	struct spdk_accel_task *accel_task;
527 
528 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
529 	if (spdk_unlikely(accel_task == NULL)) {
530 		return -ENOMEM;
531 	}
532 
533 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
534 
535 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
536 	accel_task->s.iovs[0].iov_base = src;
537 	accel_task->s.iovs[0].iov_len = nbytes;
538 	accel_task->s.iovcnt = 1;
539 	accel_task->nbytes = nbytes;
540 	accel_task->crc_dst = crc_dst;
541 	accel_task->seed = seed;
542 	accel_task->op_code = SPDK_ACCEL_OPC_CRC32C;
543 	accel_task->src_domain = NULL;
544 	accel_task->dst_domain = NULL;
545 
546 	return accel_submit_task(accel_ch, accel_task);
547 }
548 
549 /* Accel framework public API for chained CRC-32C function */
550 int
551 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst,
552 			  struct iovec *iov, uint32_t iov_cnt, uint32_t seed,
553 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
554 {
555 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
556 	struct spdk_accel_task *accel_task;
557 
558 	if (iov == NULL) {
559 		SPDK_ERRLOG("iov should not be NULL");
560 		return -EINVAL;
561 	}
562 
563 	if (!iov_cnt) {
564 		SPDK_ERRLOG("iovcnt should not be zero value\n");
565 		return -EINVAL;
566 	}
567 
568 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
569 	if (spdk_unlikely(accel_task == NULL)) {
570 		SPDK_ERRLOG("no memory\n");
571 		assert(0);
572 		return -ENOMEM;
573 	}
574 
575 	accel_task->s.iovs = iov;
576 	accel_task->s.iovcnt = iov_cnt;
577 	accel_task->nbytes = accel_get_iovlen(iov, iov_cnt);
578 	accel_task->crc_dst = crc_dst;
579 	accel_task->seed = seed;
580 	accel_task->op_code = SPDK_ACCEL_OPC_CRC32C;
581 	accel_task->src_domain = NULL;
582 	accel_task->dst_domain = NULL;
583 
584 	return accel_submit_task(accel_ch, accel_task);
585 }
586 
587 /* Accel framework public API for copy with CRC-32C function */
588 int
589 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst,
590 			      void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes,
591 			      spdk_accel_completion_cb cb_fn, void *cb_arg)
592 {
593 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
594 	struct spdk_accel_task *accel_task;
595 
596 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
597 	if (spdk_unlikely(accel_task == NULL)) {
598 		return -ENOMEM;
599 	}
600 
601 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
602 
603 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
604 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
605 	accel_task->d.iovs[0].iov_base = dst;
606 	accel_task->d.iovs[0].iov_len = nbytes;
607 	accel_task->d.iovcnt = 1;
608 	accel_task->s.iovs[0].iov_base = src;
609 	accel_task->s.iovs[0].iov_len = nbytes;
610 	accel_task->s.iovcnt = 1;
611 	accel_task->nbytes = nbytes;
612 	accel_task->crc_dst = crc_dst;
613 	accel_task->seed = seed;
614 	accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C;
615 	accel_task->src_domain = NULL;
616 	accel_task->dst_domain = NULL;
617 
618 	return accel_submit_task(accel_ch, accel_task);
619 }
620 
621 /* Accel framework public API for chained copy + CRC-32C function */
622 int
623 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst,
624 			       struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst,
625 			       uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg)
626 {
627 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
628 	struct spdk_accel_task *accel_task;
629 	uint64_t nbytes;
630 
631 	if (src_iovs == NULL) {
632 		SPDK_ERRLOG("iov should not be NULL");
633 		return -EINVAL;
634 	}
635 
636 	if (!iov_cnt) {
637 		SPDK_ERRLOG("iovcnt should not be zero value\n");
638 		return -EINVAL;
639 	}
640 
641 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
642 	if (spdk_unlikely(accel_task == NULL)) {
643 		SPDK_ERRLOG("no memory\n");
644 		assert(0);
645 		return -ENOMEM;
646 	}
647 
648 	nbytes = accel_get_iovlen(src_iovs, iov_cnt);
649 
650 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
651 
652 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
653 	accel_task->d.iovs[0].iov_base = dst;
654 	accel_task->d.iovs[0].iov_len = nbytes;
655 	accel_task->d.iovcnt = 1;
656 	accel_task->s.iovs = src_iovs;
657 	accel_task->s.iovcnt = iov_cnt;
658 	accel_task->nbytes = nbytes;
659 	accel_task->crc_dst = crc_dst;
660 	accel_task->seed = seed;
661 	accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C;
662 	accel_task->src_domain = NULL;
663 	accel_task->dst_domain = NULL;
664 
665 	return accel_submit_task(accel_ch, accel_task);
666 }
667 
668 int
669 spdk_accel_get_compress_level_range(enum spdk_accel_comp_algo comp_algo,
670 				    uint32_t *min_level, uint32_t *max_level)
671 {
672 	struct spdk_accel_module_if *module = g_modules_opc[SPDK_ACCEL_OPC_COMPRESS].module;
673 
674 	if (module->get_compress_level_range == NULL) {
675 		SPDK_ERRLOG("Module %s doesn't implement callback fn get_compress_level_range.\n", module->name);
676 		return -ENOTSUP;
677 	}
678 
679 	return module->get_compress_level_range(comp_algo, min_level, max_level);
680 }
681 
682 static int
683 _accel_check_comp_algo(enum spdk_accel_comp_algo comp_algo)
684 {
685 	struct spdk_accel_module_if *module = g_modules_opc[SPDK_ACCEL_OPC_COMPRESS].module;
686 
687 	if (!module->compress_supports_algo || !module->compress_supports_algo(comp_algo)) {
688 		SPDK_ERRLOG("Module %s doesn't support compression algo %d\n", module->name, comp_algo);
689 		return -ENOTSUP;
690 	}
691 
692 	return 0;
693 }
694 
695 int
696 spdk_accel_submit_compress_ext(struct spdk_io_channel *ch, void *dst, uint64_t nbytes,
697 			       struct iovec *src_iovs, size_t src_iovcnt,
698 			       enum spdk_accel_comp_algo comp_algo, uint32_t comp_level,
699 			       uint32_t *output_size, spdk_accel_completion_cb cb_fn, void *cb_arg)
700 {
701 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
702 	struct spdk_accel_task *accel_task;
703 	int rc;
704 
705 	rc = _accel_check_comp_algo(comp_algo);
706 	if (spdk_unlikely(rc != 0)) {
707 		return rc;
708 	}
709 
710 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
711 	if (spdk_unlikely(accel_task == NULL)) {
712 		return -ENOMEM;
713 	}
714 
715 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
716 
717 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
718 	accel_task->d.iovs[0].iov_base = dst;
719 	accel_task->d.iovs[0].iov_len = nbytes;
720 	accel_task->d.iovcnt = 1;
721 	accel_task->output_size = output_size;
722 	accel_task->s.iovs = src_iovs;
723 	accel_task->s.iovcnt = src_iovcnt;
724 	accel_task->nbytes = nbytes;
725 	accel_task->op_code = SPDK_ACCEL_OPC_COMPRESS;
726 	accel_task->src_domain = NULL;
727 	accel_task->dst_domain = NULL;
728 	accel_task->comp.algo = comp_algo;
729 	accel_task->comp.level = comp_level;
730 
731 	return accel_submit_task(accel_ch, accel_task);
732 }
733 
734 int
735 spdk_accel_submit_decompress_ext(struct spdk_io_channel *ch, struct iovec *dst_iovs,
736 				 size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
737 				 enum spdk_accel_comp_algo decomp_algo, uint32_t *output_size,
738 				 spdk_accel_completion_cb cb_fn, void *cb_arg)
739 {
740 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
741 	struct spdk_accel_task *accel_task;
742 	int rc;
743 
744 	rc = _accel_check_comp_algo(decomp_algo);
745 	if (spdk_unlikely(rc != 0)) {
746 		return rc;
747 	}
748 
749 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
750 	if (spdk_unlikely(accel_task == NULL)) {
751 		return -ENOMEM;
752 	}
753 
754 	accel_task->output_size = output_size;
755 	accel_task->s.iovs = src_iovs;
756 	accel_task->s.iovcnt = src_iovcnt;
757 	accel_task->d.iovs = dst_iovs;
758 	accel_task->d.iovcnt = dst_iovcnt;
759 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
760 	accel_task->op_code = SPDK_ACCEL_OPC_DECOMPRESS;
761 	accel_task->src_domain = NULL;
762 	accel_task->dst_domain = NULL;
763 	accel_task->comp.algo = decomp_algo;
764 
765 	return accel_submit_task(accel_ch, accel_task);
766 }
767 
768 int
769 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes,
770 			   struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size,
771 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
772 {
773 	return spdk_accel_submit_compress_ext(ch, dst, nbytes, src_iovs, src_iovcnt,
774 					      SPDK_ACCEL_COMP_ALGO_DEFLATE, 1, output_size, cb_fn, cb_arg);
775 }
776 
777 int
778 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs,
779 			     size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
780 			     uint32_t *output_size, spdk_accel_completion_cb cb_fn,
781 			     void *cb_arg)
782 {
783 	return spdk_accel_submit_decompress_ext(ch, dst_iovs, dst_iovcnt, src_iovs, src_iovcnt,
784 						SPDK_ACCEL_COMP_ALGO_DEFLATE, output_size, cb_fn, cb_arg);
785 }
786 
787 int
788 spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
789 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
790 			  struct iovec *src_iovs, uint32_t src_iovcnt,
791 			  uint64_t iv, uint32_t block_size,
792 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
793 {
794 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
795 	struct spdk_accel_task *accel_task;
796 
797 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
798 		return -EINVAL;
799 	}
800 
801 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
802 	if (spdk_unlikely(accel_task == NULL)) {
803 		return -ENOMEM;
804 	}
805 
806 	accel_task->crypto_key = key;
807 	accel_task->s.iovs = src_iovs;
808 	accel_task->s.iovcnt = src_iovcnt;
809 	accel_task->d.iovs = dst_iovs;
810 	accel_task->d.iovcnt = dst_iovcnt;
811 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
812 	accel_task->iv = iv;
813 	accel_task->block_size = block_size;
814 	accel_task->op_code = SPDK_ACCEL_OPC_ENCRYPT;
815 	accel_task->src_domain = NULL;
816 	accel_task->dst_domain = NULL;
817 
818 	return accel_submit_task(accel_ch, accel_task);
819 }
820 
821 int
822 spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
823 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
824 			  struct iovec *src_iovs, uint32_t src_iovcnt,
825 			  uint64_t iv, uint32_t block_size,
826 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
827 {
828 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
829 	struct spdk_accel_task *accel_task;
830 
831 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
832 		return -EINVAL;
833 	}
834 
835 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
836 	if (spdk_unlikely(accel_task == NULL)) {
837 		return -ENOMEM;
838 	}
839 
840 	accel_task->crypto_key = key;
841 	accel_task->s.iovs = src_iovs;
842 	accel_task->s.iovcnt = src_iovcnt;
843 	accel_task->d.iovs = dst_iovs;
844 	accel_task->d.iovcnt = dst_iovcnt;
845 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
846 	accel_task->iv = iv;
847 	accel_task->block_size = block_size;
848 	accel_task->op_code = SPDK_ACCEL_OPC_DECRYPT;
849 	accel_task->src_domain = NULL;
850 	accel_task->dst_domain = NULL;
851 
852 	return accel_submit_task(accel_ch, accel_task);
853 }
854 
855 int
856 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs,
857 		      uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
858 {
859 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
860 	struct spdk_accel_task *accel_task;
861 
862 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
863 	if (spdk_unlikely(accel_task == NULL)) {
864 		return -ENOMEM;
865 	}
866 
867 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
868 
869 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
870 	accel_task->nsrcs.srcs = sources;
871 	accel_task->nsrcs.cnt = nsrcs;
872 	accel_task->d.iovs[0].iov_base = dst;
873 	accel_task->d.iovs[0].iov_len = nbytes;
874 	accel_task->d.iovcnt = 1;
875 	accel_task->nbytes = nbytes;
876 	accel_task->op_code = SPDK_ACCEL_OPC_XOR;
877 	accel_task->src_domain = NULL;
878 	accel_task->dst_domain = NULL;
879 
880 	return accel_submit_task(accel_ch, accel_task);
881 }
882 
883 int
884 spdk_accel_submit_dif_verify(struct spdk_io_channel *ch,
885 			     struct iovec *iovs, size_t iovcnt, uint32_t num_blocks,
886 			     const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
887 			     spdk_accel_completion_cb cb_fn, void *cb_arg)
888 {
889 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
890 	struct spdk_accel_task *accel_task;
891 
892 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
893 	if (accel_task == NULL) {
894 		return -ENOMEM;
895 	}
896 
897 	accel_task->s.iovs = iovs;
898 	accel_task->s.iovcnt = iovcnt;
899 	accel_task->dif.ctx = ctx;
900 	accel_task->dif.err = err;
901 	accel_task->dif.num_blocks = num_blocks;
902 	accel_task->nbytes = num_blocks * ctx->block_size;
903 	accel_task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY;
904 	accel_task->src_domain = NULL;
905 	accel_task->dst_domain = NULL;
906 
907 	return accel_submit_task(accel_ch, accel_task);
908 }
909 
910 int
911 spdk_accel_submit_dif_generate(struct spdk_io_channel *ch,
912 			       struct iovec *iovs, size_t iovcnt, uint32_t num_blocks,
913 			       const struct spdk_dif_ctx *ctx,
914 			       spdk_accel_completion_cb cb_fn, void *cb_arg)
915 {
916 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
917 	struct spdk_accel_task *accel_task;
918 
919 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
920 	if (accel_task == NULL) {
921 		return -ENOMEM;
922 	}
923 
924 	accel_task->s.iovs = iovs;
925 	accel_task->s.iovcnt = iovcnt;
926 	accel_task->dif.ctx = ctx;
927 	accel_task->dif.num_blocks = num_blocks;
928 	accel_task->nbytes = num_blocks * ctx->block_size;
929 	accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE;
930 	accel_task->src_domain = NULL;
931 	accel_task->dst_domain = NULL;
932 
933 	return accel_submit_task(accel_ch, accel_task);
934 }
935 
936 int
937 spdk_accel_submit_dif_generate_copy(struct spdk_io_channel *ch, struct iovec *dst_iovs,
938 				    size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
939 				    uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
940 				    spdk_accel_completion_cb cb_fn, void *cb_arg)
941 {
942 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
943 	struct spdk_accel_task *accel_task;
944 
945 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
946 	if (accel_task == NULL) {
947 		return -ENOMEM;
948 	}
949 
950 	accel_task->s.iovs = src_iovs;
951 	accel_task->s.iovcnt = src_iovcnt;
952 	accel_task->d.iovs = dst_iovs;
953 	accel_task->d.iovcnt = dst_iovcnt;
954 	accel_task->dif.ctx = ctx;
955 	accel_task->dif.num_blocks = num_blocks;
956 	accel_task->nbytes = num_blocks * ctx->block_size;
957 	accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE_COPY;
958 	accel_task->src_domain = NULL;
959 	accel_task->dst_domain = NULL;
960 
961 	return accel_submit_task(accel_ch, accel_task);
962 }
963 
964 int
965 spdk_accel_submit_dif_verify_copy(struct spdk_io_channel *ch,
966 				  struct iovec *dst_iovs, size_t dst_iovcnt,
967 				  struct iovec *src_iovs, size_t src_iovcnt, uint32_t num_blocks,
968 				  const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
969 				  spdk_accel_completion_cb cb_fn, void *cb_arg)
970 {
971 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
972 	struct spdk_accel_task *accel_task;
973 
974 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
975 	if (accel_task == NULL) {
976 		return -ENOMEM;
977 	}
978 
979 	accel_task->s.iovs = src_iovs;
980 	accel_task->s.iovcnt = src_iovcnt;
981 	accel_task->d.iovs = dst_iovs;
982 	accel_task->d.iovcnt = dst_iovcnt;
983 	accel_task->dif.ctx = ctx;
984 	accel_task->dif.err = err;
985 	accel_task->dif.num_blocks = num_blocks;
986 	accel_task->nbytes = num_blocks * ctx->block_size;
987 	accel_task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY_COPY;
988 	accel_task->src_domain = NULL;
989 	accel_task->dst_domain = NULL;
990 
991 	return accel_submit_task(accel_ch, accel_task);
992 }
993 
994 int
995 spdk_accel_submit_dix_generate(struct spdk_io_channel *ch, struct iovec *iovs,
996 			       size_t iovcnt, struct iovec *md_iov, uint32_t num_blocks,
997 			       const struct spdk_dif_ctx *ctx, spdk_accel_completion_cb cb_fn,
998 			       void *cb_arg)
999 {
1000 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1001 	struct spdk_accel_task *accel_task;
1002 
1003 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
1004 	if (accel_task == NULL) {
1005 		return -ENOMEM;
1006 	}
1007 
1008 	accel_task->s.iovs = iovs;
1009 	accel_task->s.iovcnt = iovcnt;
1010 	accel_task->d.iovs = md_iov;
1011 	accel_task->d.iovcnt = 1;
1012 	accel_task->dif.ctx = ctx;
1013 	accel_task->dif.num_blocks = num_blocks;
1014 	accel_task->nbytes = num_blocks * ctx->block_size;
1015 	accel_task->op_code = SPDK_ACCEL_OPC_DIX_GENERATE;
1016 	accel_task->src_domain = NULL;
1017 	accel_task->dst_domain = NULL;
1018 
1019 	return accel_submit_task(accel_ch, accel_task);
1020 }
1021 
1022 int
1023 spdk_accel_submit_dix_verify(struct spdk_io_channel *ch, struct iovec *iovs,
1024 			     size_t iovcnt, struct iovec *md_iov, uint32_t num_blocks,
1025 			     const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
1026 			     spdk_accel_completion_cb cb_fn, void *cb_arg)
1027 {
1028 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1029 	struct spdk_accel_task *accel_task;
1030 
1031 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
1032 	if (accel_task == NULL) {
1033 		return -ENOMEM;
1034 	}
1035 
1036 	accel_task->s.iovs = iovs;
1037 	accel_task->s.iovcnt = iovcnt;
1038 	accel_task->d.iovs = md_iov;
1039 	accel_task->d.iovcnt = 1;
1040 	accel_task->dif.ctx = ctx;
1041 	accel_task->dif.err = err;
1042 	accel_task->dif.num_blocks = num_blocks;
1043 	accel_task->nbytes = num_blocks * ctx->block_size;
1044 	accel_task->op_code = SPDK_ACCEL_OPC_DIX_VERIFY;
1045 	accel_task->src_domain = NULL;
1046 	accel_task->dst_domain = NULL;
1047 
1048 	return accel_submit_task(accel_ch, accel_task);
1049 }
1050 
1051 static inline struct accel_buffer *
1052 accel_get_buf(struct accel_io_channel *ch, uint64_t len)
1053 {
1054 	struct accel_buffer *buf;
1055 
1056 	buf = SLIST_FIRST(&ch->buf_pool);
1057 	if (spdk_unlikely(buf == NULL)) {
1058 		accel_update_stats(ch, retry.bufdesc, 1);
1059 		return NULL;
1060 	}
1061 
1062 	SLIST_REMOVE_HEAD(&ch->buf_pool, link);
1063 	buf->len = len;
1064 	buf->buf = NULL;
1065 	buf->seq = NULL;
1066 	buf->cb_fn = NULL;
1067 
1068 	return buf;
1069 }
1070 
1071 static inline void
1072 accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf)
1073 {
1074 	if (buf->buf != NULL) {
1075 		spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len);
1076 	}
1077 
1078 	SLIST_INSERT_HEAD(&ch->buf_pool, buf, link);
1079 }
1080 
1081 static inline struct spdk_accel_sequence *
1082 accel_sequence_get(struct accel_io_channel *ch)
1083 {
1084 	struct spdk_accel_sequence *seq;
1085 
1086 	assert(g_opts.task_count >= ch->stats.task_outstanding);
1087 
1088 	/* Sequence cannot be allocated if number of available task objects cannot satisfy required limit.
1089 	 * This is to prevent potential dead lock when few requests are pending task resource and none can
1090 	 * advance the processing. This solution should work only if there is single async operation after
1091 	 * sequence obj obtained, so assume that is possible to happen with io buffer allocation now, if
1092 	 * there are more async operations then solution should be improved. */
1093 	if (spdk_unlikely(g_opts.task_count - ch->stats.task_outstanding < ACCEL_TASKS_IN_SEQUENCE_LIMIT)) {
1094 		return NULL;
1095 	}
1096 
1097 	seq = SLIST_FIRST(&ch->seq_pool);
1098 	if (spdk_unlikely(seq == NULL)) {
1099 		accel_update_stats(ch, retry.sequence, 1);
1100 		return NULL;
1101 	}
1102 
1103 	accel_update_stats(ch, sequence_outstanding, 1);
1104 	SLIST_REMOVE_HEAD(&ch->seq_pool, link);
1105 
1106 	TAILQ_INIT(&seq->tasks);
1107 	SLIST_INIT(&seq->bounce_bufs);
1108 
1109 	seq->ch = ch;
1110 	seq->status = 0;
1111 	seq->state = ACCEL_SEQUENCE_STATE_INIT;
1112 	seq->in_process_sequence = false;
1113 
1114 	return seq;
1115 }
1116 
1117 static inline void
1118 accel_sequence_put(struct spdk_accel_sequence *seq)
1119 {
1120 	struct accel_io_channel *ch = seq->ch;
1121 	struct accel_buffer *buf;
1122 
1123 	while (!SLIST_EMPTY(&seq->bounce_bufs)) {
1124 		buf = SLIST_FIRST(&seq->bounce_bufs);
1125 		SLIST_REMOVE_HEAD(&seq->bounce_bufs, link);
1126 		accel_put_buf(seq->ch, buf);
1127 	}
1128 
1129 	assert(TAILQ_EMPTY(&seq->tasks));
1130 	seq->ch = NULL;
1131 
1132 	SLIST_INSERT_HEAD(&ch->seq_pool, seq, link);
1133 	accel_update_stats(ch, sequence_outstanding, -1);
1134 }
1135 
1136 static void accel_sequence_task_cb(void *cb_arg, int status);
1137 
1138 static inline struct spdk_accel_task *
1139 accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq,
1140 			spdk_accel_step_cb cb_fn, void *cb_arg)
1141 {
1142 	struct spdk_accel_task *task;
1143 
1144 	task = _get_task(ch, NULL, NULL);
1145 	if (spdk_unlikely(task == NULL)) {
1146 		return task;
1147 	}
1148 
1149 	task->step_cb_fn = cb_fn;
1150 	task->cb_arg = cb_arg;
1151 	task->seq = seq;
1152 
1153 	return task;
1154 }
1155 
1156 int
1157 spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1158 		       struct iovec *dst_iovs, uint32_t dst_iovcnt,
1159 		       struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1160 		       struct iovec *src_iovs, uint32_t src_iovcnt,
1161 		       struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1162 		       spdk_accel_step_cb cb_fn, void *cb_arg)
1163 {
1164 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1165 	struct spdk_accel_task *task;
1166 	struct spdk_accel_sequence *seq = *pseq;
1167 
1168 	if (seq == NULL) {
1169 		seq = accel_sequence_get(accel_ch);
1170 		if (spdk_unlikely(seq == NULL)) {
1171 			return -ENOMEM;
1172 		}
1173 	}
1174 
1175 	assert(seq->ch == accel_ch);
1176 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1177 	if (spdk_unlikely(task == NULL)) {
1178 		if (*pseq == NULL) {
1179 			accel_sequence_put(seq);
1180 		}
1181 
1182 		return -ENOMEM;
1183 	}
1184 
1185 	task->dst_domain = dst_domain;
1186 	task->dst_domain_ctx = dst_domain_ctx;
1187 	task->d.iovs = dst_iovs;
1188 	task->d.iovcnt = dst_iovcnt;
1189 	task->src_domain = src_domain;
1190 	task->src_domain_ctx = src_domain_ctx;
1191 	task->s.iovs = src_iovs;
1192 	task->s.iovcnt = src_iovcnt;
1193 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1194 	task->op_code = SPDK_ACCEL_OPC_COPY;
1195 
1196 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1197 	*pseq = seq;
1198 
1199 	return 0;
1200 }
1201 
1202 int
1203 spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1204 		       void *buf, uint64_t len,
1205 		       struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern,
1206 		       spdk_accel_step_cb cb_fn, void *cb_arg)
1207 {
1208 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1209 	struct spdk_accel_task *task;
1210 	struct spdk_accel_sequence *seq = *pseq;
1211 
1212 	if (seq == NULL) {
1213 		seq = accel_sequence_get(accel_ch);
1214 		if (spdk_unlikely(seq == NULL)) {
1215 			return -ENOMEM;
1216 		}
1217 	}
1218 
1219 	assert(seq->ch == accel_ch);
1220 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1221 	if (spdk_unlikely(task == NULL)) {
1222 		if (*pseq == NULL) {
1223 			accel_sequence_put(seq);
1224 		}
1225 
1226 		return -ENOMEM;
1227 	}
1228 
1229 	memset(&task->fill_pattern, pattern, sizeof(uint64_t));
1230 
1231 	task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1232 	if (spdk_unlikely(!task->aux)) {
1233 		SPDK_ERRLOG("Fatal problem, aux data was not allocated\n");
1234 		if (*pseq == NULL) {
1235 			accel_sequence_put((seq));
1236 		}
1237 
1238 		task->seq = NULL;
1239 		_put_task(task->accel_ch, task);
1240 		assert(0);
1241 		return -ENOMEM;
1242 	}
1243 	SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1244 	task->has_aux = true;
1245 
1246 	task->d.iovs = &task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
1247 	task->d.iovs[0].iov_base = buf;
1248 	task->d.iovs[0].iov_len = len;
1249 	task->d.iovcnt = 1;
1250 	task->nbytes = len;
1251 	task->src_domain = NULL;
1252 	task->dst_domain = domain;
1253 	task->dst_domain_ctx = domain_ctx;
1254 	task->op_code = SPDK_ACCEL_OPC_FILL;
1255 
1256 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1257 	*pseq = seq;
1258 
1259 	return 0;
1260 }
1261 
1262 int
1263 spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1264 			     struct iovec *dst_iovs, size_t dst_iovcnt,
1265 			     struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1266 			     struct iovec *src_iovs, size_t src_iovcnt,
1267 			     struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1268 			     spdk_accel_step_cb cb_fn, void *cb_arg)
1269 {
1270 	return spdk_accel_append_decompress_ext(pseq, ch, dst_iovs, dst_iovcnt, dst_domain,
1271 						dst_domain_ctx, src_iovs, src_iovcnt, src_domain,
1272 						src_domain_ctx, SPDK_ACCEL_COMP_ALGO_DEFLATE,
1273 						cb_fn, cb_arg);
1274 }
1275 
1276 int
1277 spdk_accel_append_decompress_ext(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1278 				 struct iovec *dst_iovs, size_t dst_iovcnt,
1279 				 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1280 				 struct iovec *src_iovs, size_t src_iovcnt,
1281 				 struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1282 				 enum spdk_accel_comp_algo decomp_algo,
1283 				 spdk_accel_step_cb cb_fn, void *cb_arg)
1284 {
1285 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1286 	struct spdk_accel_task *task;
1287 	struct spdk_accel_sequence *seq = *pseq;
1288 	int rc;
1289 
1290 	rc = _accel_check_comp_algo(decomp_algo);
1291 	if (spdk_unlikely(rc != 0)) {
1292 		return rc;
1293 	}
1294 
1295 	if (seq == NULL) {
1296 		seq = accel_sequence_get(accel_ch);
1297 		if (spdk_unlikely(seq == NULL)) {
1298 			return -ENOMEM;
1299 		}
1300 	}
1301 
1302 	assert(seq->ch == accel_ch);
1303 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1304 	if (spdk_unlikely(task == NULL)) {
1305 		if (*pseq == NULL) {
1306 			accel_sequence_put(seq);
1307 		}
1308 
1309 		return -ENOMEM;
1310 	}
1311 
1312 	/* TODO: support output_size for chaining */
1313 	task->output_size = NULL;
1314 	task->dst_domain = dst_domain;
1315 	task->dst_domain_ctx = dst_domain_ctx;
1316 	task->d.iovs = dst_iovs;
1317 	task->d.iovcnt = dst_iovcnt;
1318 	task->src_domain = src_domain;
1319 	task->src_domain_ctx = src_domain_ctx;
1320 	task->s.iovs = src_iovs;
1321 	task->s.iovcnt = src_iovcnt;
1322 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1323 	task->op_code = SPDK_ACCEL_OPC_DECOMPRESS;
1324 	task->comp.algo = decomp_algo;
1325 
1326 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1327 	*pseq = seq;
1328 
1329 	return 0;
1330 }
1331 
1332 int
1333 spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1334 			  struct spdk_accel_crypto_key *key,
1335 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
1336 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1337 			  struct iovec *src_iovs, uint32_t src_iovcnt,
1338 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1339 			  uint64_t iv, uint32_t block_size,
1340 			  spdk_accel_step_cb cb_fn, void *cb_arg)
1341 {
1342 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1343 	struct spdk_accel_task *task;
1344 	struct spdk_accel_sequence *seq = *pseq;
1345 
1346 	assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size);
1347 
1348 	if (seq == NULL) {
1349 		seq = accel_sequence_get(accel_ch);
1350 		if (spdk_unlikely(seq == NULL)) {
1351 			return -ENOMEM;
1352 		}
1353 	}
1354 
1355 	assert(seq->ch == accel_ch);
1356 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1357 	if (spdk_unlikely(task == NULL)) {
1358 		if (*pseq == NULL) {
1359 			accel_sequence_put(seq);
1360 		}
1361 
1362 		return -ENOMEM;
1363 	}
1364 
1365 	task->crypto_key = key;
1366 	task->src_domain = src_domain;
1367 	task->src_domain_ctx = src_domain_ctx;
1368 	task->s.iovs = src_iovs;
1369 	task->s.iovcnt = src_iovcnt;
1370 	task->dst_domain = dst_domain;
1371 	task->dst_domain_ctx = dst_domain_ctx;
1372 	task->d.iovs = dst_iovs;
1373 	task->d.iovcnt = dst_iovcnt;
1374 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1375 	task->iv = iv;
1376 	task->block_size = block_size;
1377 	task->op_code = SPDK_ACCEL_OPC_ENCRYPT;
1378 
1379 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1380 	*pseq = seq;
1381 
1382 	return 0;
1383 }
1384 
1385 int
1386 spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1387 			  struct spdk_accel_crypto_key *key,
1388 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
1389 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1390 			  struct iovec *src_iovs, uint32_t src_iovcnt,
1391 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1392 			  uint64_t iv, uint32_t block_size,
1393 			  spdk_accel_step_cb cb_fn, void *cb_arg)
1394 {
1395 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1396 	struct spdk_accel_task *task;
1397 	struct spdk_accel_sequence *seq = *pseq;
1398 
1399 	assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size);
1400 
1401 	if (seq == NULL) {
1402 		seq = accel_sequence_get(accel_ch);
1403 		if (spdk_unlikely(seq == NULL)) {
1404 			return -ENOMEM;
1405 		}
1406 	}
1407 
1408 	assert(seq->ch == accel_ch);
1409 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1410 	if (spdk_unlikely(task == NULL)) {
1411 		if (*pseq == NULL) {
1412 			accel_sequence_put(seq);
1413 		}
1414 
1415 		return -ENOMEM;
1416 	}
1417 
1418 	task->crypto_key = key;
1419 	task->src_domain = src_domain;
1420 	task->src_domain_ctx = src_domain_ctx;
1421 	task->s.iovs = src_iovs;
1422 	task->s.iovcnt = src_iovcnt;
1423 	task->dst_domain = dst_domain;
1424 	task->dst_domain_ctx = dst_domain_ctx;
1425 	task->d.iovs = dst_iovs;
1426 	task->d.iovcnt = dst_iovcnt;
1427 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1428 	task->iv = iv;
1429 	task->block_size = block_size;
1430 	task->op_code = SPDK_ACCEL_OPC_DECRYPT;
1431 
1432 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1433 	*pseq = seq;
1434 
1435 	return 0;
1436 }
1437 
1438 int
1439 spdk_accel_append_crc32c(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1440 			 uint32_t *dst, struct iovec *iovs, uint32_t iovcnt,
1441 			 struct spdk_memory_domain *domain, void *domain_ctx,
1442 			 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg)
1443 {
1444 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1445 	struct spdk_accel_task *task;
1446 	struct spdk_accel_sequence *seq = *pseq;
1447 
1448 	if (seq == NULL) {
1449 		seq = accel_sequence_get(accel_ch);
1450 		if (spdk_unlikely(seq == NULL)) {
1451 			return -ENOMEM;
1452 		}
1453 	}
1454 
1455 	assert(seq->ch == accel_ch);
1456 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1457 	if (spdk_unlikely(task == NULL)) {
1458 		if (*pseq == NULL) {
1459 			accel_sequence_put(seq);
1460 		}
1461 
1462 		return -ENOMEM;
1463 	}
1464 
1465 	task->s.iovs = iovs;
1466 	task->s.iovcnt = iovcnt;
1467 	task->src_domain = domain;
1468 	task->src_domain_ctx = domain_ctx;
1469 	task->nbytes = accel_get_iovlen(iovs, iovcnt);
1470 	task->crc_dst = dst;
1471 	task->seed = seed;
1472 	task->op_code = SPDK_ACCEL_OPC_CRC32C;
1473 	task->dst_domain = NULL;
1474 
1475 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1476 	*pseq = seq;
1477 
1478 	return 0;
1479 }
1480 
1481 int
1482 spdk_accel_append_dif_verify(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1483 			     struct iovec *iovs, size_t iovcnt,
1484 			     struct spdk_memory_domain *domain, void *domain_ctx,
1485 			     uint32_t num_blocks,
1486 			     const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
1487 			     spdk_accel_step_cb cb_fn, void *cb_arg)
1488 {
1489 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1490 	struct spdk_accel_task *task;
1491 	struct spdk_accel_sequence *seq = *pseq;
1492 
1493 	if (seq == NULL) {
1494 		seq = accel_sequence_get(accel_ch);
1495 		if (spdk_unlikely(seq == NULL)) {
1496 			return -ENOMEM;
1497 		}
1498 	}
1499 
1500 	assert(seq->ch == accel_ch);
1501 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1502 	if (spdk_unlikely(task == NULL)) {
1503 		if (*pseq == NULL) {
1504 			accel_sequence_put(seq);
1505 		}
1506 
1507 		return -ENOMEM;
1508 	}
1509 
1510 	task->s.iovs = iovs;
1511 	task->s.iovcnt = iovcnt;
1512 	task->src_domain = domain;
1513 	task->src_domain_ctx = domain_ctx;
1514 	task->dst_domain = NULL;
1515 	task->dif.ctx = ctx;
1516 	task->dif.err = err;
1517 	task->dif.num_blocks = num_blocks;
1518 	task->nbytes = num_blocks * ctx->block_size;
1519 	task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY;
1520 
1521 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1522 	*pseq = seq;
1523 
1524 	return 0;
1525 }
1526 
1527 int
1528 spdk_accel_append_dif_verify_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1529 				  struct iovec *dst_iovs, size_t dst_iovcnt,
1530 				  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1531 				  struct iovec *src_iovs, size_t src_iovcnt,
1532 				  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1533 				  uint32_t num_blocks,
1534 				  const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
1535 				  spdk_accel_step_cb cb_fn, void *cb_arg)
1536 {
1537 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1538 	struct spdk_accel_task *task;
1539 	struct spdk_accel_sequence *seq = *pseq;
1540 
1541 	if (seq == NULL) {
1542 		seq = accel_sequence_get(accel_ch);
1543 		if (spdk_unlikely(seq == NULL)) {
1544 			return -ENOMEM;
1545 		}
1546 	}
1547 
1548 	assert(seq->ch == accel_ch);
1549 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1550 	if (spdk_unlikely(task == NULL)) {
1551 		if (*pseq == NULL) {
1552 			accel_sequence_put(seq);
1553 		}
1554 
1555 		return -ENOMEM;
1556 	}
1557 
1558 	task->dst_domain = dst_domain;
1559 	task->dst_domain_ctx = dst_domain_ctx;
1560 	task->d.iovs = dst_iovs;
1561 	task->d.iovcnt = dst_iovcnt;
1562 	task->src_domain = src_domain;
1563 	task->src_domain_ctx = src_domain_ctx;
1564 	task->s.iovs = src_iovs;
1565 	task->s.iovcnt = src_iovcnt;
1566 	task->dif.ctx = ctx;
1567 	task->dif.err = err;
1568 	task->dif.num_blocks = num_blocks;
1569 	task->nbytes = num_blocks * ctx->block_size;
1570 	task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY_COPY;
1571 
1572 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1573 	*pseq = seq;
1574 
1575 	return 0;
1576 }
1577 
1578 int
1579 spdk_accel_append_dif_generate(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1580 			       struct iovec *iovs, size_t iovcnt,
1581 			       struct spdk_memory_domain *domain, void *domain_ctx,
1582 			       uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
1583 			       spdk_accel_step_cb cb_fn, void *cb_arg)
1584 {
1585 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1586 	struct spdk_accel_task *task;
1587 	struct spdk_accel_sequence *seq = *pseq;
1588 
1589 	if (seq == NULL) {
1590 		seq = accel_sequence_get(accel_ch);
1591 		if (spdk_unlikely(seq == NULL)) {
1592 			return -ENOMEM;
1593 		}
1594 	}
1595 
1596 	assert(seq->ch == accel_ch);
1597 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1598 	if (spdk_unlikely(task == NULL)) {
1599 		if (*pseq == NULL) {
1600 			accel_sequence_put(seq);
1601 		}
1602 
1603 		return -ENOMEM;
1604 	}
1605 
1606 	task->s.iovs = iovs;
1607 	task->s.iovcnt = iovcnt;
1608 	task->src_domain = domain;
1609 	task->src_domain_ctx = domain_ctx;
1610 	task->dst_domain = NULL;
1611 	task->dif.ctx = ctx;
1612 	task->dif.num_blocks = num_blocks;
1613 	task->nbytes = num_blocks * ctx->block_size;
1614 	task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE;
1615 
1616 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1617 	*pseq = seq;
1618 
1619 	return 0;
1620 }
1621 
1622 int
1623 spdk_accel_append_dif_generate_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1624 				    struct iovec *dst_iovs, size_t dst_iovcnt,
1625 				    struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1626 				    struct iovec *src_iovs, size_t src_iovcnt,
1627 				    struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1628 				    uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
1629 				    spdk_accel_step_cb cb_fn, void *cb_arg)
1630 {
1631 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1632 	struct spdk_accel_task *task;
1633 	struct spdk_accel_sequence *seq = *pseq;
1634 
1635 	if (seq == NULL) {
1636 		seq = accel_sequence_get(accel_ch);
1637 		if (spdk_unlikely(seq == NULL)) {
1638 			return -ENOMEM;
1639 		}
1640 	}
1641 
1642 	assert(seq->ch == accel_ch);
1643 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1644 	if (spdk_unlikely(task == NULL)) {
1645 		if (*pseq == NULL) {
1646 			accel_sequence_put(seq);
1647 		}
1648 
1649 		return -ENOMEM;
1650 	}
1651 
1652 	task->dst_domain = dst_domain;
1653 	task->dst_domain_ctx = dst_domain_ctx;
1654 	task->d.iovs = dst_iovs;
1655 	task->d.iovcnt = dst_iovcnt;
1656 	task->src_domain = src_domain;
1657 	task->src_domain_ctx = src_domain_ctx;
1658 	task->s.iovs = src_iovs;
1659 	task->s.iovcnt = src_iovcnt;
1660 	task->nbytes = num_blocks * ctx->block_size;
1661 	task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE_COPY;
1662 
1663 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1664 	*pseq = seq;
1665 
1666 	return 0;
1667 }
1668 
1669 int
1670 spdk_accel_append_dix_generate(struct spdk_accel_sequence **seq, struct spdk_io_channel *ch,
1671 			       struct iovec *iovs, size_t iovcnt, struct spdk_memory_domain *domain,
1672 			       void *domain_ctx, struct iovec *md_iov,
1673 			       struct spdk_memory_domain *md_domain, void *md_domain_ctx,
1674 			       uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
1675 			       spdk_accel_step_cb cb_fn, void *cb_arg)
1676 {
1677 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1678 	struct spdk_accel_task *task;
1679 	struct spdk_accel_sequence *pseq = *seq;
1680 
1681 	if (pseq == NULL) {
1682 		pseq = accel_sequence_get(accel_ch);
1683 		if (spdk_unlikely(pseq == NULL)) {
1684 			return -ENOMEM;
1685 		}
1686 	}
1687 
1688 	assert(pseq->ch == accel_ch);
1689 	task = accel_sequence_get_task(accel_ch, pseq, cb_fn, cb_arg);
1690 	if (spdk_unlikely(task == NULL)) {
1691 		if (*seq == NULL) {
1692 			accel_sequence_put(pseq);
1693 		}
1694 
1695 		return -ENOMEM;
1696 	}
1697 
1698 	task->d.iovs = md_iov;
1699 	task->d.iovcnt = 1;
1700 	task->dst_domain = md_domain;
1701 	task->dst_domain_ctx = md_domain_ctx;
1702 	task->s.iovs = iovs;
1703 	task->s.iovcnt = iovcnt;
1704 	task->src_domain = domain;
1705 	task->src_domain_ctx = domain_ctx;
1706 	task->dif.ctx = ctx;
1707 	task->dif.num_blocks = num_blocks;
1708 	task->nbytes = num_blocks * ctx->block_size;
1709 	task->op_code = SPDK_ACCEL_OPC_DIX_GENERATE;
1710 
1711 	TAILQ_INSERT_TAIL(&pseq->tasks, task, seq_link);
1712 	*seq = pseq;
1713 
1714 	return 0;
1715 }
1716 
1717 int
1718 spdk_accel_append_dix_verify(struct spdk_accel_sequence **seq, struct spdk_io_channel *ch,
1719 			     struct iovec *iovs, size_t iovcnt, struct spdk_memory_domain *domain,
1720 			     void *domain_ctx, struct iovec *md_iov,
1721 			     struct spdk_memory_domain *md_domain, void *md_domain_ctx,
1722 			     uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
1723 			     struct spdk_dif_error *err, spdk_accel_step_cb cb_fn, void *cb_arg)
1724 {
1725 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1726 	struct spdk_accel_task *task;
1727 	struct spdk_accel_sequence *pseq = *seq;
1728 
1729 	if (pseq == NULL) {
1730 		pseq = accel_sequence_get(accel_ch);
1731 		if (spdk_unlikely(pseq == NULL)) {
1732 			return -ENOMEM;
1733 		}
1734 	}
1735 
1736 	assert(pseq->ch == accel_ch);
1737 	task = accel_sequence_get_task(accel_ch, pseq, cb_fn, cb_arg);
1738 	if (spdk_unlikely(task == NULL)) {
1739 		if (*seq == NULL) {
1740 			accel_sequence_put(pseq);
1741 		}
1742 
1743 		return -ENOMEM;
1744 	}
1745 
1746 	task->d.iovs = md_iov;
1747 	task->d.iovcnt = 1;
1748 	task->dst_domain = md_domain;
1749 	task->dst_domain_ctx = md_domain_ctx;
1750 	task->s.iovs = iovs;
1751 	task->s.iovcnt = iovcnt;
1752 	task->src_domain = domain;
1753 	task->src_domain_ctx = domain_ctx;
1754 	task->dif.ctx = ctx;
1755 	task->dif.err = err;
1756 	task->dif.num_blocks = num_blocks;
1757 	task->nbytes = num_blocks * ctx->block_size;
1758 	task->op_code = SPDK_ACCEL_OPC_DIX_VERIFY;
1759 
1760 	TAILQ_INSERT_TAIL(&pseq->tasks, task, seq_link);
1761 	*seq = pseq;
1762 
1763 	return 0;
1764 }
1765 
1766 int
1767 spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf,
1768 		   struct spdk_memory_domain **domain, void **domain_ctx)
1769 {
1770 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1771 	struct accel_buffer *accel_buf;
1772 
1773 	accel_buf = accel_get_buf(accel_ch, len);
1774 	if (spdk_unlikely(accel_buf == NULL)) {
1775 		return -ENOMEM;
1776 	}
1777 
1778 	accel_buf->ch = accel_ch;
1779 
1780 	/* We always return the same pointer and identify the buffers through domain_ctx */
1781 	*buf = ACCEL_BUFFER_BASE;
1782 	*domain_ctx = accel_buf;
1783 	*domain = g_accel_domain;
1784 
1785 	return 0;
1786 }
1787 
1788 void
1789 spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf,
1790 		   struct spdk_memory_domain *domain, void *domain_ctx)
1791 {
1792 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1793 	struct accel_buffer *accel_buf = domain_ctx;
1794 
1795 	assert(domain == g_accel_domain);
1796 	assert(buf == ACCEL_BUFFER_BASE);
1797 
1798 	accel_put_buf(accel_ch, accel_buf);
1799 }
1800 
1801 static void
1802 accel_sequence_complete_task(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1803 {
1804 	struct accel_io_channel *ch = seq->ch;
1805 	spdk_accel_step_cb cb_fn;
1806 	void *cb_arg;
1807 
1808 	TAILQ_REMOVE(&seq->tasks, task, seq_link);
1809 	cb_fn = task->step_cb_fn;
1810 	cb_arg = task->cb_arg;
1811 	task->seq = NULL;
1812 	if (task->has_aux) {
1813 		SLIST_INSERT_HEAD(&ch->task_aux_data_pool, task->aux, link);
1814 		task->aux = NULL;
1815 		task->has_aux = false;
1816 	}
1817 
1818 	_put_task(ch, task);
1819 
1820 	if (cb_fn != NULL) {
1821 		cb_fn(cb_arg);
1822 	}
1823 }
1824 
1825 static void
1826 accel_sequence_complete_tasks(struct spdk_accel_sequence *seq)
1827 {
1828 	struct spdk_accel_task *task;
1829 
1830 	while (!TAILQ_EMPTY(&seq->tasks)) {
1831 		task = TAILQ_FIRST(&seq->tasks);
1832 		accel_sequence_complete_task(seq, task);
1833 	}
1834 }
1835 
1836 static void
1837 accel_sequence_complete(struct spdk_accel_sequence *seq)
1838 {
1839 	spdk_accel_completion_cb cb_fn = seq->cb_fn;
1840 	void *cb_arg = seq->cb_arg;
1841 	int status = seq->status;
1842 
1843 	SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, status);
1844 
1845 	accel_update_stats(seq->ch, sequence_executed, 1);
1846 	if (spdk_unlikely(status != 0)) {
1847 		accel_update_stats(seq->ch, sequence_failed, 1);
1848 	}
1849 
1850 	/* First notify all users that appended operations to this sequence */
1851 	accel_sequence_complete_tasks(seq);
1852 	accel_sequence_put(seq);
1853 
1854 	/* Then notify the user that finished the sequence */
1855 	cb_fn(cb_arg, status);
1856 }
1857 
1858 static void
1859 accel_update_virt_iov(struct iovec *diov, struct iovec *siov, struct accel_buffer *accel_buf)
1860 {
1861 	uintptr_t offset;
1862 
1863 	offset = (uintptr_t)siov->iov_base & ACCEL_BUFFER_OFFSET_MASK;
1864 	assert(offset < accel_buf->len);
1865 
1866 	diov->iov_base = (char *)accel_buf->buf + offset;
1867 	diov->iov_len = siov->iov_len;
1868 }
1869 
1870 static void
1871 accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf)
1872 {
1873 	struct spdk_accel_task *task;
1874 	struct iovec *iov;
1875 
1876 	/* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks
1877 	 * in a sequence that were using it.
1878 	 */
1879 	TAILQ_FOREACH(task, &seq->tasks, seq_link) {
1880 		if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) {
1881 			if (!task->has_aux) {
1882 				task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1883 				assert(task->aux && "Can't allocate aux data structure");
1884 				task->has_aux = true;
1885 				SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1886 			}
1887 
1888 			iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_SRC];
1889 			assert(task->s.iovcnt == 1);
1890 			accel_update_virt_iov(iov, &task->s.iovs[0], buf);
1891 			task->src_domain = NULL;
1892 			task->s.iovs = iov;
1893 		}
1894 		if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) {
1895 			if (!task->has_aux) {
1896 				task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1897 				assert(task->aux && "Can't allocate aux data structure");
1898 				task->has_aux = true;
1899 				SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1900 			}
1901 
1902 			iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_DST];
1903 			assert(task->d.iovcnt == 1);
1904 			accel_update_virt_iov(iov, &task->d.iovs[0], buf);
1905 			task->dst_domain = NULL;
1906 			task->d.iovs = iov;
1907 		}
1908 	}
1909 }
1910 
1911 static void accel_process_sequence(struct spdk_accel_sequence *seq);
1912 
1913 static void
1914 accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf)
1915 {
1916 	struct accel_buffer *accel_buf;
1917 
1918 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1919 
1920 	assert(accel_buf->seq != NULL);
1921 	assert(accel_buf->buf == NULL);
1922 	accel_buf->buf = buf;
1923 
1924 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1925 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1926 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1927 	accel_process_sequence(accel_buf->seq);
1928 }
1929 
1930 static bool
1931 accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf,
1932 			 spdk_iobuf_get_cb cb_fn)
1933 {
1934 	struct accel_io_channel *ch = seq->ch;
1935 
1936 	assert(buf->seq == NULL);
1937 
1938 	buf->seq = seq;
1939 
1940 	/* Buffer might be already allocated by memory domain translation. */
1941 	if (buf->buf) {
1942 		return true;
1943 	}
1944 
1945 	buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn);
1946 	if (spdk_unlikely(buf->buf == NULL)) {
1947 		accel_update_stats(ch, retry.iobuf, 1);
1948 		return false;
1949 	}
1950 
1951 	return true;
1952 }
1953 
1954 static bool
1955 accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1956 {
1957 	/* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to
1958 	 * NULL */
1959 	if (task->src_domain == g_accel_domain) {
1960 		if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx,
1961 					      accel_iobuf_get_virtbuf_cb)) {
1962 			return false;
1963 		}
1964 
1965 		accel_sequence_set_virtbuf(seq, task->src_domain_ctx);
1966 	}
1967 
1968 	if (task->dst_domain == g_accel_domain) {
1969 		if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx,
1970 					      accel_iobuf_get_virtbuf_cb)) {
1971 			return false;
1972 		}
1973 
1974 		accel_sequence_set_virtbuf(seq, task->dst_domain_ctx);
1975 	}
1976 
1977 	return true;
1978 }
1979 
1980 static void
1981 accel_sequence_get_buf_cb(struct spdk_iobuf_entry *entry, void *buf)
1982 {
1983 	struct accel_buffer *accel_buf;
1984 
1985 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1986 
1987 	assert(accel_buf->seq != NULL);
1988 	assert(accel_buf->buf == NULL);
1989 	accel_buf->buf = buf;
1990 
1991 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1992 	accel_buf->cb_fn(accel_buf->seq, accel_buf->cb_ctx);
1993 }
1994 
1995 bool
1996 spdk_accel_alloc_sequence_buf(struct spdk_accel_sequence *seq, void *buf,
1997 			      struct spdk_memory_domain *domain, void *domain_ctx,
1998 			      spdk_accel_sequence_get_buf_cb cb_fn, void *cb_ctx)
1999 {
2000 	struct accel_buffer *accel_buf = domain_ctx;
2001 
2002 	assert(domain == g_accel_domain);
2003 	accel_buf->cb_fn = cb_fn;
2004 	accel_buf->cb_ctx = cb_ctx;
2005 
2006 	if (!accel_sequence_alloc_buf(seq, accel_buf, accel_sequence_get_buf_cb)) {
2007 		return false;
2008 	}
2009 
2010 	accel_sequence_set_virtbuf(seq, accel_buf);
2011 
2012 	return true;
2013 }
2014 
2015 struct spdk_accel_task *
2016 spdk_accel_sequence_first_task(struct spdk_accel_sequence *seq)
2017 {
2018 	return TAILQ_FIRST(&seq->tasks);
2019 }
2020 
2021 struct spdk_accel_task *
2022 spdk_accel_sequence_next_task(struct spdk_accel_task *task)
2023 {
2024 	return TAILQ_NEXT(task, seq_link);
2025 }
2026 
2027 static inline void
2028 accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs,
2029 			uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx,
2030 			struct accel_buffer *buf)
2031 {
2032 	bounce->orig_iovs = *iovs;
2033 	bounce->orig_iovcnt = *iovcnt;
2034 	bounce->orig_domain = *domain;
2035 	bounce->orig_domain_ctx = *domain_ctx;
2036 	bounce->iov.iov_base = buf->buf;
2037 	bounce->iov.iov_len = buf->len;
2038 
2039 	*iovs = &bounce->iov;
2040 	*iovcnt = 1;
2041 	*domain = NULL;
2042 }
2043 
2044 static void
2045 accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
2046 {
2047 	struct spdk_accel_task *task;
2048 	struct accel_buffer *accel_buf;
2049 
2050 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
2051 	assert(accel_buf->buf == NULL);
2052 	accel_buf->buf = buf;
2053 
2054 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
2055 	assert(task != NULL);
2056 
2057 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
2058 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
2059 	assert(task->aux);
2060 	assert(task->has_aux);
2061 	accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain,
2062 				&task->src_domain_ctx, accel_buf);
2063 	accel_process_sequence(accel_buf->seq);
2064 }
2065 
2066 static void
2067 accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
2068 {
2069 	struct spdk_accel_task *task;
2070 	struct accel_buffer *accel_buf;
2071 
2072 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
2073 	assert(accel_buf->buf == NULL);
2074 	accel_buf->buf = buf;
2075 
2076 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
2077 	assert(task != NULL);
2078 
2079 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
2080 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
2081 	assert(task->aux);
2082 	assert(task->has_aux);
2083 	accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain,
2084 				&task->dst_domain_ctx, accel_buf);
2085 	accel_process_sequence(accel_buf->seq);
2086 }
2087 
2088 static int
2089 accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
2090 {
2091 	struct accel_buffer *buf;
2092 
2093 	if (task->src_domain != NULL) {
2094 		/* By the time we're here, accel buffers should have been allocated */
2095 		assert(task->src_domain != g_accel_domain);
2096 
2097 		if (!task->has_aux) {
2098 			task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
2099 			if (spdk_unlikely(!task->aux)) {
2100 				SPDK_ERRLOG("Can't allocate aux data structure\n");
2101 				assert(0);
2102 				return -EAGAIN;
2103 			}
2104 			task->has_aux = true;
2105 			SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
2106 		}
2107 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt));
2108 		if (buf == NULL) {
2109 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
2110 			return -ENOMEM;
2111 		}
2112 
2113 		SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link);
2114 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) {
2115 			return -EAGAIN;
2116 		}
2117 
2118 		accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt,
2119 					&task->src_domain, &task->src_domain_ctx, buf);
2120 	}
2121 
2122 	if (task->dst_domain != NULL) {
2123 		/* By the time we're here, accel buffers should have been allocated */
2124 		assert(task->dst_domain != g_accel_domain);
2125 
2126 		if (!task->has_aux) {
2127 			task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
2128 			if (spdk_unlikely(!task->aux)) {
2129 				SPDK_ERRLOG("Can't allocate aux data structure\n");
2130 				assert(0);
2131 				return -EAGAIN;
2132 			}
2133 			task->has_aux = true;
2134 			SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
2135 		}
2136 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt));
2137 		if (buf == NULL) {
2138 			/* The src buffer will be released when a sequence is completed */
2139 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
2140 			return -ENOMEM;
2141 		}
2142 
2143 		SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link);
2144 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) {
2145 			return -EAGAIN;
2146 		}
2147 
2148 		accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt,
2149 					&task->dst_domain, &task->dst_domain_ctx, buf);
2150 	}
2151 
2152 	return 0;
2153 }
2154 
2155 static void
2156 accel_task_pull_data_cb(void *ctx, int status)
2157 {
2158 	struct spdk_accel_sequence *seq = ctx;
2159 
2160 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
2161 	if (spdk_likely(status == 0)) {
2162 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
2163 	} else {
2164 		accel_sequence_set_fail(seq, status);
2165 	}
2166 
2167 	accel_process_sequence(seq);
2168 }
2169 
2170 static void
2171 accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
2172 {
2173 	int rc;
2174 
2175 	assert(task->has_aux);
2176 	assert(task->aux);
2177 	assert(task->aux->bounce.s.orig_iovs != NULL);
2178 	assert(task->aux->bounce.s.orig_domain != NULL);
2179 	assert(task->aux->bounce.s.orig_domain != g_accel_domain);
2180 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
2181 
2182 	rc = spdk_memory_domain_pull_data(task->aux->bounce.s.orig_domain,
2183 					  task->aux->bounce.s.orig_domain_ctx,
2184 					  task->aux->bounce.s.orig_iovs, task->aux->bounce.s.orig_iovcnt,
2185 					  task->s.iovs, task->s.iovcnt,
2186 					  accel_task_pull_data_cb, seq);
2187 	if (spdk_unlikely(rc != 0)) {
2188 		SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n",
2189 			    spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc);
2190 		accel_sequence_set_fail(seq, rc);
2191 	}
2192 }
2193 
2194 static void
2195 accel_task_push_data_cb(void *ctx, int status)
2196 {
2197 	struct spdk_accel_sequence *seq = ctx;
2198 
2199 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
2200 	if (spdk_likely(status == 0)) {
2201 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
2202 	} else {
2203 		accel_sequence_set_fail(seq, status);
2204 	}
2205 
2206 	accel_process_sequence(seq);
2207 }
2208 
2209 static void
2210 accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
2211 {
2212 	int rc;
2213 
2214 	assert(task->has_aux);
2215 	assert(task->aux);
2216 	assert(task->aux->bounce.d.orig_iovs != NULL);
2217 	assert(task->aux->bounce.d.orig_domain != NULL);
2218 	assert(task->aux->bounce.d.orig_domain != g_accel_domain);
2219 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
2220 
2221 	rc = spdk_memory_domain_push_data(task->aux->bounce.d.orig_domain,
2222 					  task->aux->bounce.d.orig_domain_ctx,
2223 					  task->aux->bounce.d.orig_iovs, task->aux->bounce.d.orig_iovcnt,
2224 					  task->d.iovs, task->d.iovcnt,
2225 					  accel_task_push_data_cb, seq);
2226 	if (spdk_unlikely(rc != 0)) {
2227 		SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n",
2228 			    spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc);
2229 		accel_sequence_set_fail(seq, rc);
2230 	}
2231 }
2232 
2233 static void
2234 accel_process_sequence(struct spdk_accel_sequence *seq)
2235 {
2236 	struct accel_io_channel *accel_ch = seq->ch;
2237 	struct spdk_accel_task *task;
2238 	enum accel_sequence_state state;
2239 	int rc;
2240 
2241 	/* Prevent recursive calls to this function */
2242 	if (spdk_unlikely(seq->in_process_sequence)) {
2243 		return;
2244 	}
2245 	seq->in_process_sequence = true;
2246 
2247 	task = TAILQ_FIRST(&seq->tasks);
2248 	do {
2249 		state = seq->state;
2250 		switch (state) {
2251 		case ACCEL_SEQUENCE_STATE_INIT:
2252 			if (g_accel_driver != NULL) {
2253 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS);
2254 				break;
2255 			}
2256 		/* Fall through */
2257 		case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF:
2258 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
2259 			if (!accel_sequence_check_virtbuf(seq, task)) {
2260 				/* We couldn't allocate a buffer, wait until one is available */
2261 				break;
2262 			}
2263 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
2264 		/* Fall through */
2265 		case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF:
2266 			/* If a module supports memory domains, we don't need to allocate bounce
2267 			 * buffers */
2268 			if (g_modules_opc[task->op_code].supports_memory_domains) {
2269 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
2270 				break;
2271 			}
2272 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
2273 			rc = accel_sequence_check_bouncebuf(seq, task);
2274 			if (spdk_unlikely(rc != 0)) {
2275 				/* We couldn't allocate a buffer, wait until one is available */
2276 				if (rc == -EAGAIN) {
2277 					break;
2278 				}
2279 				accel_sequence_set_fail(seq, rc);
2280 				break;
2281 			}
2282 			if (task->has_aux && task->s.iovs == &task->aux->bounce.s.iov) {
2283 				assert(task->aux->bounce.s.orig_iovs);
2284 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA);
2285 				break;
2286 			}
2287 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
2288 		/* Fall through */
2289 		case ACCEL_SEQUENCE_STATE_EXEC_TASK:
2290 			SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n",
2291 				      g_opcode_strings[task->op_code], seq);
2292 
2293 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK);
2294 			rc = accel_submit_task(accel_ch, task);
2295 			if (spdk_unlikely(rc != 0)) {
2296 				SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n",
2297 					    g_opcode_strings[task->op_code], seq);
2298 				accel_sequence_set_fail(seq, rc);
2299 			}
2300 			break;
2301 		case ACCEL_SEQUENCE_STATE_PULL_DATA:
2302 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
2303 			accel_task_pull_data(seq, task);
2304 			break;
2305 		case ACCEL_SEQUENCE_STATE_COMPLETE_TASK:
2306 			if (task->has_aux && task->d.iovs == &task->aux->bounce.d.iov) {
2307 				assert(task->aux->bounce.d.orig_iovs);
2308 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA);
2309 				break;
2310 			}
2311 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
2312 			break;
2313 		case ACCEL_SEQUENCE_STATE_PUSH_DATA:
2314 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
2315 			accel_task_push_data(seq, task);
2316 			break;
2317 		case ACCEL_SEQUENCE_STATE_NEXT_TASK:
2318 			accel_sequence_complete_task(seq, task);
2319 			/* Check if there are any remaining tasks */
2320 			task = TAILQ_FIRST(&seq->tasks);
2321 			if (task == NULL) {
2322 				/* Immediately return here to make sure we don't touch the sequence
2323 				 * after it's completed */
2324 				accel_sequence_complete(seq);
2325 				return;
2326 			}
2327 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT);
2328 			break;
2329 		case ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS:
2330 			assert(!TAILQ_EMPTY(&seq->tasks));
2331 
2332 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS);
2333 			rc = g_accel_driver->execute_sequence(accel_ch->driver_channel, seq);
2334 			if (spdk_unlikely(rc != 0)) {
2335 				SPDK_ERRLOG("Failed to execute sequence: %p using driver: %s\n",
2336 					    seq, g_accel_driver->name);
2337 				accel_sequence_set_fail(seq, rc);
2338 			}
2339 			break;
2340 		case ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS:
2341 			/* Get the task again, as the driver might have completed some tasks
2342 			 * synchronously */
2343 			task = TAILQ_FIRST(&seq->tasks);
2344 			if (task == NULL) {
2345 				/* Immediately return here to make sure we don't touch the sequence
2346 				 * after it's completed */
2347 				accel_sequence_complete(seq);
2348 				return;
2349 			}
2350 			/* We don't want to execute the next task through the driver, so we
2351 			 * explicitly omit the INIT state here */
2352 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
2353 			break;
2354 		case ACCEL_SEQUENCE_STATE_ERROR:
2355 			/* Immediately return here to make sure we don't touch the sequence
2356 			 * after it's completed */
2357 			assert(seq->status != 0);
2358 			accel_sequence_complete(seq);
2359 			return;
2360 		case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF:
2361 		case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF:
2362 		case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA:
2363 		case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
2364 		case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA:
2365 		case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS:
2366 			break;
2367 		default:
2368 			assert(0 && "bad state");
2369 			break;
2370 		}
2371 	} while (seq->state != state);
2372 
2373 	seq->in_process_sequence = false;
2374 }
2375 
2376 static void
2377 accel_sequence_task_cb(void *cb_arg, int status)
2378 {
2379 	struct spdk_accel_sequence *seq = cb_arg;
2380 	struct spdk_accel_task *task = TAILQ_FIRST(&seq->tasks);
2381 
2382 	switch (seq->state) {
2383 	case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
2384 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK);
2385 		if (spdk_unlikely(status != 0)) {
2386 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n",
2387 				    g_opcode_strings[task->op_code], seq);
2388 			accel_sequence_set_fail(seq, status);
2389 		}
2390 
2391 		accel_process_sequence(seq);
2392 		break;
2393 	case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS:
2394 		assert(g_accel_driver != NULL);
2395 		/* Immediately remove the task from the outstanding list to make sure the next call
2396 		 * to spdk_accel_sequence_first_task() doesn't return it */
2397 		accel_sequence_complete_task(seq, task);
2398 		if (spdk_unlikely(status != 0)) {
2399 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p through "
2400 				    "driver: %s\n", g_opcode_strings[task->op_code], seq,
2401 				    g_accel_driver->name);
2402 			/* Update status without using accel_sequence_set_fail() to avoid changing
2403 			 * seq's state to ERROR until driver calls spdk_accel_sequence_continue() */
2404 			seq->status = status;
2405 		}
2406 		break;
2407 	default:
2408 		assert(0 && "bad state");
2409 		break;
2410 	}
2411 }
2412 
2413 void
2414 spdk_accel_sequence_continue(struct spdk_accel_sequence *seq)
2415 {
2416 	assert(g_accel_driver != NULL);
2417 	assert(seq->state == ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS);
2418 
2419 	if (spdk_likely(seq->status == 0)) {
2420 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS);
2421 	} else {
2422 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
2423 	}
2424 
2425 	accel_process_sequence(seq);
2426 }
2427 
2428 static bool
2429 accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt)
2430 {
2431 	/* For now, just do a dumb check that the iovecs arrays are exactly the same */
2432 	if (iovacnt != iovbcnt) {
2433 		return false;
2434 	}
2435 
2436 	return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0;
2437 }
2438 
2439 static bool
2440 accel_task_set_dstbuf(struct spdk_accel_task *task, struct spdk_accel_task *next)
2441 {
2442 	struct spdk_accel_task *prev;
2443 
2444 	switch (task->op_code) {
2445 	case SPDK_ACCEL_OPC_DECOMPRESS:
2446 	case SPDK_ACCEL_OPC_FILL:
2447 	case SPDK_ACCEL_OPC_ENCRYPT:
2448 	case SPDK_ACCEL_OPC_DECRYPT:
2449 	case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
2450 	case SPDK_ACCEL_OPC_DIF_VERIFY_COPY:
2451 		if (task->dst_domain != next->src_domain) {
2452 			return false;
2453 		}
2454 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
2455 					next->s.iovs, next->s.iovcnt)) {
2456 			return false;
2457 		}
2458 		task->d.iovs = next->d.iovs;
2459 		task->d.iovcnt = next->d.iovcnt;
2460 		task->dst_domain = next->dst_domain;
2461 		task->dst_domain_ctx = next->dst_domain_ctx;
2462 		break;
2463 	case SPDK_ACCEL_OPC_CRC32C:
2464 	case SPDK_ACCEL_OPC_DIX_GENERATE:
2465 	case SPDK_ACCEL_OPC_DIX_VERIFY:
2466 		/* crc32 and dix_generate/verify are special, because they do not have a dst buffer */
2467 		if (task->src_domain != next->src_domain) {
2468 			return false;
2469 		}
2470 		if (!accel_compare_iovs(task->s.iovs, task->s.iovcnt,
2471 					next->s.iovs, next->s.iovcnt)) {
2472 			return false;
2473 		}
2474 		/* We can only change operation's buffer if we can change previous task's buffer */
2475 		prev = TAILQ_PREV(task, accel_sequence_tasks, seq_link);
2476 		if (prev == NULL) {
2477 			return false;
2478 		}
2479 		if (!accel_task_set_dstbuf(prev, next)) {
2480 			return false;
2481 		}
2482 		task->s.iovs = next->d.iovs;
2483 		task->s.iovcnt = next->d.iovcnt;
2484 		task->src_domain = next->dst_domain;
2485 		task->src_domain_ctx = next->dst_domain_ctx;
2486 		break;
2487 	default:
2488 		return false;
2489 	}
2490 
2491 	return true;
2492 }
2493 
2494 static void
2495 accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task,
2496 			   struct spdk_accel_task **next_task)
2497 {
2498 	struct spdk_accel_task *next = *next_task;
2499 
2500 	switch (task->op_code) {
2501 	case SPDK_ACCEL_OPC_COPY:
2502 		/* We only allow changing src of operations that actually have a src, e.g. we never
2503 		 * do it for fill.  Theoretically, it is possible, but we'd have to be careful to
2504 		 * change the src of the operation after fill (which in turn could also be a fill).
2505 		 * So, for the sake of simplicity, skip this type of operations for now.
2506 		 */
2507 		if (next->op_code != SPDK_ACCEL_OPC_DECOMPRESS &&
2508 		    next->op_code != SPDK_ACCEL_OPC_COPY &&
2509 		    next->op_code != SPDK_ACCEL_OPC_ENCRYPT &&
2510 		    next->op_code != SPDK_ACCEL_OPC_DECRYPT &&
2511 		    next->op_code != SPDK_ACCEL_OPC_COPY_CRC32C &&
2512 		    next->op_code != SPDK_ACCEL_OPC_DIF_GENERATE_COPY &&
2513 		    next->op_code != SPDK_ACCEL_OPC_DIF_VERIFY_COPY) {
2514 			break;
2515 		}
2516 		if (task->dst_domain != next->src_domain) {
2517 			break;
2518 		}
2519 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
2520 					next->s.iovs, next->s.iovcnt)) {
2521 			break;
2522 		}
2523 		next->s.iovs = task->s.iovs;
2524 		next->s.iovcnt = task->s.iovcnt;
2525 		next->src_domain = task->src_domain;
2526 		next->src_domain_ctx = task->src_domain_ctx;
2527 		accel_sequence_complete_task(seq, task);
2528 		break;
2529 	case SPDK_ACCEL_OPC_DECOMPRESS:
2530 	case SPDK_ACCEL_OPC_FILL:
2531 	case SPDK_ACCEL_OPC_ENCRYPT:
2532 	case SPDK_ACCEL_OPC_DECRYPT:
2533 	case SPDK_ACCEL_OPC_CRC32C:
2534 	case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
2535 	case SPDK_ACCEL_OPC_DIF_VERIFY_COPY:
2536 	case SPDK_ACCEL_OPC_DIX_GENERATE:
2537 	case SPDK_ACCEL_OPC_DIX_VERIFY:
2538 		/* We can only merge tasks when one of them is a copy */
2539 		if (next->op_code != SPDK_ACCEL_OPC_COPY) {
2540 			break;
2541 		}
2542 		if (!accel_task_set_dstbuf(task, next)) {
2543 			break;
2544 		}
2545 		/* We're removing next_task from the tasks queue, so we need to update its pointer,
2546 		 * so that the TAILQ_FOREACH_SAFE() loop below works correctly */
2547 		*next_task = TAILQ_NEXT(next, seq_link);
2548 		accel_sequence_complete_task(seq, next);
2549 		break;
2550 	default:
2551 		assert(0 && "bad opcode");
2552 		break;
2553 	}
2554 }
2555 
2556 void
2557 spdk_accel_sequence_finish(struct spdk_accel_sequence *seq,
2558 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
2559 {
2560 	struct spdk_accel_task *task, *next;
2561 
2562 	/* Try to remove any copy operations if possible */
2563 	TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) {
2564 		if (next == NULL) {
2565 			break;
2566 		}
2567 		accel_sequence_merge_tasks(seq, task, &next);
2568 	}
2569 
2570 	seq->cb_fn = cb_fn;
2571 	seq->cb_arg = cb_arg;
2572 
2573 	accel_process_sequence(seq);
2574 }
2575 
2576 void
2577 spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq)
2578 {
2579 	struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks);
2580 	struct spdk_accel_task *task;
2581 
2582 	TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link);
2583 
2584 	while (!TAILQ_EMPTY(&tasks)) {
2585 		task = TAILQ_FIRST(&tasks);
2586 		TAILQ_REMOVE(&tasks, task, seq_link);
2587 		TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link);
2588 	}
2589 }
2590 
2591 void
2592 spdk_accel_sequence_abort(struct spdk_accel_sequence *seq)
2593 {
2594 	if (seq == NULL) {
2595 		return;
2596 	}
2597 
2598 	accel_sequence_complete_tasks(seq);
2599 	accel_sequence_put(seq);
2600 }
2601 
2602 struct spdk_memory_domain *
2603 spdk_accel_get_memory_domain(void)
2604 {
2605 	return g_accel_domain;
2606 }
2607 
2608 static struct spdk_accel_module_if *
2609 _module_find_by_name(const char *name)
2610 {
2611 	struct spdk_accel_module_if *accel_module = NULL;
2612 
2613 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2614 		if (strcmp(name, accel_module->name) == 0) {
2615 			break;
2616 		}
2617 	}
2618 
2619 	return accel_module;
2620 }
2621 
2622 static inline struct spdk_accel_crypto_key *
2623 _accel_crypto_key_get(const char *name)
2624 {
2625 	struct spdk_accel_crypto_key *key;
2626 
2627 	assert(spdk_spin_held(&g_keyring_spin));
2628 
2629 	TAILQ_FOREACH(key, &g_keyring, link) {
2630 		if (strcmp(name, key->param.key_name) == 0) {
2631 			return key;
2632 		}
2633 	}
2634 
2635 	return NULL;
2636 }
2637 
2638 static void
2639 accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key)
2640 {
2641 	if (key->param.hex_key) {
2642 		spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2);
2643 		free(key->param.hex_key);
2644 	}
2645 	if (key->param.hex_key2) {
2646 		spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2);
2647 		free(key->param.hex_key2);
2648 	}
2649 	free(key->param.tweak_mode);
2650 	free(key->param.key_name);
2651 	free(key->param.cipher);
2652 	if (key->key) {
2653 		spdk_memset_s(key->key, key->key_size, 0, key->key_size);
2654 		free(key->key);
2655 	}
2656 	if (key->key2) {
2657 		spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size);
2658 		free(key->key2);
2659 	}
2660 	free(key);
2661 }
2662 
2663 static void
2664 accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key)
2665 {
2666 	assert(key->module_if);
2667 	assert(key->module_if->crypto_key_deinit);
2668 
2669 	key->module_if->crypto_key_deinit(key);
2670 	accel_crypto_key_free_mem(key);
2671 }
2672 
2673 /*
2674  * This function mitigates a timing side channel which could be caused by using strcmp()
2675  * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in
2676  * the article [1] for more details
2677  * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html
2678  */
2679 static bool
2680 accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len)
2681 {
2682 	size_t i;
2683 	volatile size_t x = k1_len ^ k2_len;
2684 
2685 	for (i = 0; ((i < k1_len) & (i < k2_len)); i++) {
2686 		x |= k1[i] ^ k2[i];
2687 	}
2688 
2689 	return x == 0;
2690 }
2691 
2692 static const char *g_tweak_modes[] = {
2693 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA] = "SIMPLE_LBA",
2694 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_JOIN_NEG_LBA_WITH_LBA] = "JOIN_NEG_LBA_WITH_LBA",
2695 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_FULL_LBA] = "INCR_512_FULL_LBA",
2696 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_UPPER_LBA] = "INCR_512_UPPER_LBA",
2697 };
2698 
2699 static const char *g_ciphers[] = {
2700 	[SPDK_ACCEL_CIPHER_AES_CBC] = "AES_CBC",
2701 	[SPDK_ACCEL_CIPHER_AES_XTS] = "AES_XTS",
2702 };
2703 
2704 int
2705 spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param)
2706 {
2707 	struct spdk_accel_module_if *module;
2708 	struct spdk_accel_crypto_key *key;
2709 	size_t hex_key_size, hex_key2_size;
2710 	bool found = false;
2711 	size_t i;
2712 	int rc;
2713 
2714 	if (!param || !param->hex_key || !param->cipher || !param->key_name) {
2715 		return -EINVAL;
2716 	}
2717 
2718 	if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) {
2719 		/* hardly ever possible, but let's check and warn the user */
2720 		SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n");
2721 	}
2722 	module = g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module;
2723 
2724 	if (!module) {
2725 		SPDK_ERRLOG("No accel module found assigned for crypto operation\n");
2726 		return -ENOENT;
2727 	}
2728 
2729 	if (!module->crypto_key_init || !module->crypto_supports_cipher) {
2730 		SPDK_ERRLOG("Module %s doesn't support crypto operations\n", module->name);
2731 		return -ENOTSUP;
2732 	}
2733 
2734 	key = calloc(1, sizeof(*key));
2735 	if (!key) {
2736 		return -ENOMEM;
2737 	}
2738 
2739 	key->param.key_name = strdup(param->key_name);
2740 	if (!key->param.key_name) {
2741 		rc = -ENOMEM;
2742 		goto error;
2743 	}
2744 
2745 	for (i = 0; i < SPDK_COUNTOF(g_ciphers); ++i) {
2746 		assert(g_ciphers[i]);
2747 
2748 		if (strncmp(param->cipher, g_ciphers[i], strlen(g_ciphers[i])) == 0) {
2749 			key->cipher = i;
2750 			found = true;
2751 			break;
2752 		}
2753 	}
2754 
2755 	if (!found) {
2756 		SPDK_ERRLOG("Failed to parse cipher\n");
2757 		rc = -EINVAL;
2758 		goto error;
2759 	}
2760 
2761 	key->param.cipher = strdup(param->cipher);
2762 	if (!key->param.cipher) {
2763 		rc = -ENOMEM;
2764 		goto error;
2765 	}
2766 
2767 	hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2768 	if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
2769 		SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2770 		rc = -EINVAL;
2771 		goto error;
2772 	}
2773 
2774 	if (hex_key_size == 0) {
2775 		SPDK_ERRLOG("key1 size cannot be 0\n");
2776 		rc = -EINVAL;
2777 		goto error;
2778 	}
2779 
2780 	key->param.hex_key = strdup(param->hex_key);
2781 	if (!key->param.hex_key) {
2782 		rc = -ENOMEM;
2783 		goto error;
2784 	}
2785 
2786 	key->key_size = hex_key_size / 2;
2787 	key->key = spdk_unhexlify(key->param.hex_key);
2788 	if (!key->key) {
2789 		SPDK_ERRLOG("Failed to unhexlify key1\n");
2790 		rc = -EINVAL;
2791 		goto error;
2792 	}
2793 
2794 	if (param->hex_key2) {
2795 		hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2796 		if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
2797 			SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2798 			rc = -EINVAL;
2799 			goto error;
2800 		}
2801 
2802 		if (hex_key2_size == 0) {
2803 			SPDK_ERRLOG("key2 size cannot be 0\n");
2804 			rc = -EINVAL;
2805 			goto error;
2806 		}
2807 
2808 		key->param.hex_key2 = strdup(param->hex_key2);
2809 		if (!key->param.hex_key2) {
2810 			rc = -ENOMEM;
2811 			goto error;
2812 		}
2813 
2814 		key->key2_size = hex_key2_size / 2;
2815 		key->key2 = spdk_unhexlify(key->param.hex_key2);
2816 		if (!key->key2) {
2817 			SPDK_ERRLOG("Failed to unhexlify key2\n");
2818 			rc = -EINVAL;
2819 			goto error;
2820 		}
2821 	}
2822 
2823 	key->tweak_mode = ACCEL_CRYPTO_TWEAK_MODE_DEFAULT;
2824 	if (param->tweak_mode) {
2825 		found = false;
2826 
2827 		key->param.tweak_mode = strdup(param->tweak_mode);
2828 		if (!key->param.tweak_mode) {
2829 			rc = -ENOMEM;
2830 			goto error;
2831 		}
2832 
2833 		for (i = 0; i < SPDK_COUNTOF(g_tweak_modes); ++i) {
2834 			assert(g_tweak_modes[i]);
2835 
2836 			if (strncmp(param->tweak_mode, g_tweak_modes[i], strlen(g_tweak_modes[i])) == 0) {
2837 				key->tweak_mode = i;
2838 				found = true;
2839 				break;
2840 			}
2841 		}
2842 
2843 		if (!found) {
2844 			SPDK_ERRLOG("Failed to parse tweak mode\n");
2845 			rc = -EINVAL;
2846 			goto error;
2847 		}
2848 	}
2849 
2850 	if ((!module->crypto_supports_tweak_mode && key->tweak_mode != ACCEL_CRYPTO_TWEAK_MODE_DEFAULT) ||
2851 	    (module->crypto_supports_tweak_mode && !module->crypto_supports_tweak_mode(key->tweak_mode))) {
2852 		SPDK_ERRLOG("Module %s doesn't support %s tweak mode\n", module->name,
2853 			    g_tweak_modes[key->tweak_mode]);
2854 		rc = -EINVAL;
2855 		goto error;
2856 	}
2857 
2858 	if (!module->crypto_supports_cipher(key->cipher, key->key_size)) {
2859 		SPDK_ERRLOG("Module %s doesn't support %s cipher with %zu key size\n", module->name,
2860 			    g_ciphers[key->cipher], key->key_size);
2861 		rc = -EINVAL;
2862 		goto error;
2863 	}
2864 
2865 	if (key->cipher == SPDK_ACCEL_CIPHER_AES_XTS) {
2866 		if (!key->key2) {
2867 			SPDK_ERRLOG("%s key2 is missing\n", g_ciphers[key->cipher]);
2868 			rc = -EINVAL;
2869 			goto error;
2870 		}
2871 
2872 		if (key->key_size != key->key2_size) {
2873 			SPDK_ERRLOG("%s key size %zu is not equal to key2 size %zu\n", g_ciphers[key->cipher],
2874 				    key->key_size,
2875 				    key->key2_size);
2876 			rc = -EINVAL;
2877 			goto error;
2878 		}
2879 
2880 		if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) {
2881 			SPDK_ERRLOG("%s identical keys are not secure\n", g_ciphers[key->cipher]);
2882 			rc = -EINVAL;
2883 			goto error;
2884 		}
2885 	}
2886 
2887 	if (key->cipher == SPDK_ACCEL_CIPHER_AES_CBC) {
2888 		if (key->key2_size) {
2889 			SPDK_ERRLOG("%s doesn't use key2\n", g_ciphers[key->cipher]);
2890 			rc = -EINVAL;
2891 			goto error;
2892 		}
2893 	}
2894 
2895 	key->module_if = module;
2896 
2897 	spdk_spin_lock(&g_keyring_spin);
2898 	if (_accel_crypto_key_get(param->key_name)) {
2899 		rc = -EEXIST;
2900 	} else {
2901 		rc = module->crypto_key_init(key);
2902 		if (rc) {
2903 			SPDK_ERRLOG("Module %s failed to initialize crypto key\n", module->name);
2904 		} else {
2905 			TAILQ_INSERT_TAIL(&g_keyring, key, link);
2906 		}
2907 	}
2908 	spdk_spin_unlock(&g_keyring_spin);
2909 
2910 	if (rc) {
2911 		goto error;
2912 	}
2913 
2914 	return 0;
2915 
2916 error:
2917 	accel_crypto_key_free_mem(key);
2918 	return rc;
2919 }
2920 
2921 int
2922 spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key)
2923 {
2924 	if (!key || !key->module_if) {
2925 		return -EINVAL;
2926 	}
2927 
2928 	spdk_spin_lock(&g_keyring_spin);
2929 	if (!_accel_crypto_key_get(key->param.key_name)) {
2930 		spdk_spin_unlock(&g_keyring_spin);
2931 		return -ENOENT;
2932 	}
2933 	TAILQ_REMOVE(&g_keyring, key, link);
2934 	spdk_spin_unlock(&g_keyring_spin);
2935 
2936 	accel_crypto_key_destroy_unsafe(key);
2937 
2938 	return 0;
2939 }
2940 
2941 struct spdk_accel_crypto_key *
2942 spdk_accel_crypto_key_get(const char *name)
2943 {
2944 	struct spdk_accel_crypto_key *key;
2945 
2946 	spdk_spin_lock(&g_keyring_spin);
2947 	key = _accel_crypto_key_get(name);
2948 	spdk_spin_unlock(&g_keyring_spin);
2949 
2950 	return key;
2951 }
2952 
2953 /* Helper function when accel modules register with the framework. */
2954 void
2955 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module)
2956 {
2957 	struct spdk_accel_module_if *tmp;
2958 
2959 	if (_module_find_by_name(accel_module->name)) {
2960 		SPDK_NOTICELOG("Module %s already registered\n", accel_module->name);
2961 		assert(false);
2962 		return;
2963 	}
2964 
2965 	TAILQ_FOREACH(tmp, &spdk_accel_module_list, tailq) {
2966 		if (accel_module->priority < tmp->priority) {
2967 			break;
2968 		}
2969 	}
2970 
2971 	if (tmp != NULL) {
2972 		TAILQ_INSERT_BEFORE(tmp, accel_module, tailq);
2973 	} else {
2974 		TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq);
2975 	}
2976 }
2977 
2978 /* Framework level channel create callback. */
2979 static int
2980 accel_create_channel(void *io_device, void *ctx_buf)
2981 {
2982 	struct accel_io_channel	*accel_ch = ctx_buf;
2983 	struct spdk_accel_task *accel_task;
2984 	struct spdk_accel_task_aux_data *accel_task_aux;
2985 	struct spdk_accel_sequence *seq;
2986 	struct accel_buffer *buf;
2987 	size_t task_size_aligned;
2988 	uint8_t *task_mem;
2989 	uint32_t i = 0, j;
2990 	int rc;
2991 
2992 	task_size_aligned = SPDK_ALIGN_CEIL(g_max_accel_module_size, SPDK_CACHE_LINE_SIZE);
2993 	accel_ch->task_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE,
2994 				   g_opts.task_count * task_size_aligned);
2995 	if (!accel_ch->task_pool_base) {
2996 		return -ENOMEM;
2997 	}
2998 	memset(accel_ch->task_pool_base, 0, g_opts.task_count * task_size_aligned);
2999 
3000 	accel_ch->seq_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE,
3001 						g_opts.sequence_count * sizeof(struct spdk_accel_sequence));
3002 	if (accel_ch->seq_pool_base == NULL) {
3003 		goto err;
3004 	}
3005 	memset(accel_ch->seq_pool_base, 0, g_opts.sequence_count * sizeof(struct spdk_accel_sequence));
3006 
3007 	accel_ch->task_aux_data_base = calloc(g_opts.task_count, sizeof(struct spdk_accel_task_aux_data));
3008 	if (accel_ch->task_aux_data_base == NULL) {
3009 		goto err;
3010 	}
3011 
3012 	accel_ch->buf_pool_base = calloc(g_opts.buf_count, sizeof(struct accel_buffer));
3013 	if (accel_ch->buf_pool_base == NULL) {
3014 		goto err;
3015 	}
3016 
3017 	STAILQ_INIT(&accel_ch->task_pool);
3018 	SLIST_INIT(&accel_ch->task_aux_data_pool);
3019 	SLIST_INIT(&accel_ch->seq_pool);
3020 	SLIST_INIT(&accel_ch->buf_pool);
3021 
3022 	task_mem = accel_ch->task_pool_base;
3023 	for (i = 0; i < g_opts.task_count; i++) {
3024 		accel_task = (struct spdk_accel_task *)task_mem;
3025 		accel_task->aux = NULL;
3026 		STAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link);
3027 		task_mem += task_size_aligned;
3028 		accel_task_aux = &accel_ch->task_aux_data_base[i];
3029 		SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task_aux, link);
3030 	}
3031 	for (i = 0; i < g_opts.sequence_count; i++) {
3032 		seq = &accel_ch->seq_pool_base[i];
3033 		SLIST_INSERT_HEAD(&accel_ch->seq_pool, seq, link);
3034 	}
3035 	for (i = 0; i < g_opts.buf_count; i++) {
3036 		buf = &accel_ch->buf_pool_base[i];
3037 		SLIST_INSERT_HEAD(&accel_ch->buf_pool, buf, link);
3038 	}
3039 
3040 	/* Assign modules and get IO channels for each */
3041 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
3042 		accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel();
3043 		/* This can happen if idxd runs out of channels. */
3044 		if (accel_ch->module_ch[i] == NULL) {
3045 			SPDK_ERRLOG("Module %s failed to get io channel\n", g_modules_opc[i].module->name);
3046 			goto err;
3047 		}
3048 	}
3049 
3050 	if (g_accel_driver != NULL) {
3051 		accel_ch->driver_channel = g_accel_driver->get_io_channel();
3052 		if (accel_ch->driver_channel == NULL) {
3053 			SPDK_ERRLOG("Failed to get driver's IO channel\n");
3054 			goto err;
3055 		}
3056 	}
3057 
3058 	rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", g_opts.small_cache_size,
3059 				     g_opts.large_cache_size);
3060 	if (rc != 0) {
3061 		SPDK_ERRLOG("Failed to initialize iobuf accel channel\n");
3062 		goto err;
3063 	}
3064 
3065 	return 0;
3066 err:
3067 	if (accel_ch->driver_channel != NULL) {
3068 		spdk_put_io_channel(accel_ch->driver_channel);
3069 	}
3070 	for (j = 0; j < i; j++) {
3071 		spdk_put_io_channel(accel_ch->module_ch[j]);
3072 	}
3073 	free(accel_ch->task_pool_base);
3074 	free(accel_ch->task_aux_data_base);
3075 	free(accel_ch->seq_pool_base);
3076 	free(accel_ch->buf_pool_base);
3077 
3078 	return -ENOMEM;
3079 }
3080 
3081 static void
3082 accel_add_stats(struct accel_stats *total, struct accel_stats *stats)
3083 {
3084 	int i;
3085 
3086 	total->sequence_executed += stats->sequence_executed;
3087 	total->sequence_failed += stats->sequence_failed;
3088 	total->sequence_outstanding += stats->sequence_outstanding;
3089 	total->task_outstanding += stats->task_outstanding;
3090 	total->retry.task += stats->retry.task;
3091 	total->retry.sequence += stats->retry.sequence;
3092 	total->retry.iobuf += stats->retry.iobuf;
3093 	total->retry.bufdesc += stats->retry.bufdesc;
3094 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; ++i) {
3095 		total->operations[i].executed += stats->operations[i].executed;
3096 		total->operations[i].failed += stats->operations[i].failed;
3097 		total->operations[i].num_bytes += stats->operations[i].num_bytes;
3098 	}
3099 }
3100 
3101 /* Framework level channel destroy callback. */
3102 static void
3103 accel_destroy_channel(void *io_device, void *ctx_buf)
3104 {
3105 	struct accel_io_channel	*accel_ch = ctx_buf;
3106 	int i;
3107 
3108 	spdk_iobuf_channel_fini(&accel_ch->iobuf);
3109 
3110 	if (accel_ch->driver_channel != NULL) {
3111 		spdk_put_io_channel(accel_ch->driver_channel);
3112 	}
3113 
3114 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
3115 		assert(accel_ch->module_ch[i] != NULL);
3116 		spdk_put_io_channel(accel_ch->module_ch[i]);
3117 		accel_ch->module_ch[i] = NULL;
3118 	}
3119 
3120 	/* Update global stats to make sure channel's stats aren't lost after a channel is gone */
3121 	spdk_spin_lock(&g_stats_lock);
3122 	accel_add_stats(&g_stats, &accel_ch->stats);
3123 	spdk_spin_unlock(&g_stats_lock);
3124 
3125 	free(accel_ch->task_pool_base);
3126 	free(accel_ch->task_aux_data_base);
3127 	free(accel_ch->seq_pool_base);
3128 	free(accel_ch->buf_pool_base);
3129 }
3130 
3131 struct spdk_io_channel *
3132 spdk_accel_get_io_channel(void)
3133 {
3134 	return spdk_get_io_channel(&spdk_accel_module_list);
3135 }
3136 
3137 static int
3138 accel_module_initialize(void)
3139 {
3140 	struct spdk_accel_module_if *accel_module, *tmp_module;
3141 	int rc = 0, module_rc;
3142 
3143 	TAILQ_FOREACH_SAFE(accel_module, &spdk_accel_module_list, tailq, tmp_module) {
3144 		module_rc = accel_module->module_init();
3145 		if (module_rc) {
3146 			TAILQ_REMOVE(&spdk_accel_module_list, accel_module, tailq);
3147 			if (module_rc == -ENODEV) {
3148 				SPDK_NOTICELOG("No devices for module %s, skipping\n", accel_module->name);
3149 			} else if (!rc) {
3150 				SPDK_ERRLOG("Module %s initialization failed with %d\n", accel_module->name, module_rc);
3151 				rc = module_rc;
3152 			}
3153 			continue;
3154 		}
3155 
3156 		SPDK_DEBUGLOG(accel, "Module %s initialized.\n", accel_module->name);
3157 	}
3158 
3159 	return rc;
3160 }
3161 
3162 static void
3163 accel_module_init_opcode(enum spdk_accel_opcode opcode)
3164 {
3165 	struct accel_module *module = &g_modules_opc[opcode];
3166 	struct spdk_accel_module_if *module_if = module->module;
3167 
3168 	if (module_if->get_memory_domains != NULL) {
3169 		module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0;
3170 	}
3171 }
3172 
3173 static int
3174 accel_memory_domain_translate(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
3175 			      struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx,
3176 			      void *addr, size_t len, struct spdk_memory_domain_translation_result *result)
3177 {
3178 	struct accel_buffer *buf = src_domain_ctx;
3179 
3180 	SPDK_DEBUGLOG(accel, "translate addr %p, len %zu\n", addr, len);
3181 
3182 	assert(g_accel_domain == src_domain);
3183 	assert(spdk_memory_domain_get_system_domain() == dst_domain);
3184 	assert(buf->buf == NULL);
3185 	assert(addr == ACCEL_BUFFER_BASE);
3186 	assert(len == buf->len);
3187 
3188 	buf->buf = spdk_iobuf_get(&buf->ch->iobuf, buf->len, NULL, NULL);
3189 	if (spdk_unlikely(buf->buf == NULL)) {
3190 		return -ENOMEM;
3191 	}
3192 
3193 	result->iov_count = 1;
3194 	result->iov.iov_base = buf->buf;
3195 	result->iov.iov_len = buf->len;
3196 	SPDK_DEBUGLOG(accel, "translated addr %p\n", result->iov.iov_base);
3197 	return 0;
3198 }
3199 
3200 static void
3201 accel_memory_domain_invalidate(struct spdk_memory_domain *domain, void *domain_ctx,
3202 			       struct iovec *iov, uint32_t iovcnt)
3203 {
3204 	struct accel_buffer *buf = domain_ctx;
3205 
3206 	SPDK_DEBUGLOG(accel, "invalidate addr %p, len %zu\n", iov[0].iov_base, iov[0].iov_len);
3207 
3208 	assert(g_accel_domain == domain);
3209 	assert(iovcnt == 1);
3210 	assert(buf->buf != NULL);
3211 	assert(iov[0].iov_base == buf->buf);
3212 	assert(iov[0].iov_len == buf->len);
3213 
3214 	spdk_iobuf_put(&buf->ch->iobuf, buf->buf, buf->len);
3215 	buf->buf = NULL;
3216 }
3217 
3218 int
3219 spdk_accel_initialize(void)
3220 {
3221 	enum spdk_accel_opcode op;
3222 	struct spdk_accel_module_if *accel_module = NULL;
3223 	int rc;
3224 
3225 	/*
3226 	 * We need a unique identifier for the accel framework, so use the
3227 	 * spdk_accel_module_list address for this purpose.
3228 	 */
3229 	spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel,
3230 				sizeof(struct accel_io_channel), "accel");
3231 
3232 	spdk_spin_init(&g_keyring_spin);
3233 	spdk_spin_init(&g_stats_lock);
3234 
3235 	rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL,
3236 				       "SPDK_ACCEL_DMA_DEVICE");
3237 	if (rc != 0) {
3238 		SPDK_ERRLOG("Failed to create accel memory domain\n");
3239 		return rc;
3240 	}
3241 
3242 	spdk_memory_domain_set_translation(g_accel_domain, accel_memory_domain_translate);
3243 	spdk_memory_domain_set_invalidate(g_accel_domain, accel_memory_domain_invalidate);
3244 
3245 	g_modules_started = true;
3246 	rc = accel_module_initialize();
3247 	if (rc) {
3248 		return rc;
3249 	}
3250 
3251 	if (g_accel_driver != NULL && g_accel_driver->init != NULL) {
3252 		rc = g_accel_driver->init();
3253 		if (rc != 0) {
3254 			SPDK_ERRLOG("Failed to initialize driver %s: %s\n", g_accel_driver->name,
3255 				    spdk_strerror(-rc));
3256 			return rc;
3257 		}
3258 	}
3259 
3260 	/* The module list is order by priority, with the highest priority modules being at the end
3261 	 * of the list.  The software module should be somewhere at the beginning of the list,
3262 	 * before all HW modules.
3263 	 * NOTE: all opcodes must be supported by software in the event that no HW modules are
3264 	 * initialized to support the operation.
3265 	 */
3266 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
3267 		for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
3268 			if (accel_module->supports_opcode(op)) {
3269 				g_modules_opc[op].module = accel_module;
3270 				SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name);
3271 			}
3272 		}
3273 
3274 		if (accel_module->get_ctx_size != NULL) {
3275 			g_max_accel_module_size = spdk_max(g_max_accel_module_size,
3276 							   accel_module->get_ctx_size());
3277 		}
3278 	}
3279 
3280 	/* Now lets check for overrides and apply all that exist */
3281 	for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
3282 		if (g_modules_opc_override[op] != NULL) {
3283 			accel_module = _module_find_by_name(g_modules_opc_override[op]);
3284 			if (accel_module == NULL) {
3285 				SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]);
3286 				return -EINVAL;
3287 			}
3288 			if (accel_module->supports_opcode(op) == false) {
3289 				SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op);
3290 				return -EINVAL;
3291 			}
3292 			g_modules_opc[op].module = accel_module;
3293 		}
3294 	}
3295 
3296 	if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) {
3297 		SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations");
3298 		return -EINVAL;
3299 	}
3300 	if (g_modules_opc[SPDK_ACCEL_OPC_COMPRESS].module !=
3301 	    g_modules_opc[SPDK_ACCEL_OPC_DECOMPRESS].module) {
3302 		SPDK_ERRLOG("Different accel modules are assigned to compress and decompress operations");
3303 		return -EINVAL;
3304 	}
3305 
3306 	for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
3307 		assert(g_modules_opc[op].module != NULL);
3308 		accel_module_init_opcode(op);
3309 	}
3310 
3311 	rc = spdk_iobuf_register_module("accel");
3312 	if (rc != 0) {
3313 		SPDK_ERRLOG("Failed to register accel iobuf module\n");
3314 		return rc;
3315 	}
3316 
3317 	return 0;
3318 }
3319 
3320 static void
3321 accel_module_finish_cb(void)
3322 {
3323 	spdk_accel_fini_cb cb_fn = g_fini_cb_fn;
3324 
3325 	cb_fn(g_fini_cb_arg);
3326 	g_fini_cb_fn = NULL;
3327 	g_fini_cb_arg = NULL;
3328 }
3329 
3330 static void
3331 accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str,
3332 			   const char *module_str)
3333 {
3334 	spdk_json_write_object_begin(w);
3335 	spdk_json_write_named_string(w, "method", "accel_assign_opc");
3336 	spdk_json_write_named_object_begin(w, "params");
3337 	spdk_json_write_named_string(w, "opname", opc_str);
3338 	spdk_json_write_named_string(w, "module", module_str);
3339 	spdk_json_write_object_end(w);
3340 	spdk_json_write_object_end(w);
3341 }
3342 
3343 static void
3344 __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
3345 {
3346 	spdk_json_write_named_string(w, "name", key->param.key_name);
3347 	spdk_json_write_named_string(w, "cipher", key->param.cipher);
3348 	spdk_json_write_named_string(w, "key", key->param.hex_key);
3349 	if (key->param.hex_key2) {
3350 		spdk_json_write_named_string(w, "key2", key->param.hex_key2);
3351 	}
3352 
3353 	if (key->param.tweak_mode) {
3354 		spdk_json_write_named_string(w, "tweak_mode", key->param.tweak_mode);
3355 	}
3356 }
3357 
3358 void
3359 _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
3360 {
3361 	spdk_json_write_object_begin(w);
3362 	__accel_crypto_key_dump_param(w, key);
3363 	spdk_json_write_object_end(w);
3364 }
3365 
3366 static void
3367 _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w,
3368 				    struct spdk_accel_crypto_key *key)
3369 {
3370 	spdk_json_write_object_begin(w);
3371 	spdk_json_write_named_string(w, "method", "accel_crypto_key_create");
3372 	spdk_json_write_named_object_begin(w, "params");
3373 	__accel_crypto_key_dump_param(w, key);
3374 	spdk_json_write_object_end(w);
3375 	spdk_json_write_object_end(w);
3376 }
3377 
3378 static void
3379 accel_write_options(struct spdk_json_write_ctx *w)
3380 {
3381 	spdk_json_write_object_begin(w);
3382 	spdk_json_write_named_string(w, "method", "accel_set_options");
3383 	spdk_json_write_named_object_begin(w, "params");
3384 	spdk_json_write_named_uint32(w, "small_cache_size", g_opts.small_cache_size);
3385 	spdk_json_write_named_uint32(w, "large_cache_size", g_opts.large_cache_size);
3386 	spdk_json_write_named_uint32(w, "task_count", g_opts.task_count);
3387 	spdk_json_write_named_uint32(w, "sequence_count", g_opts.sequence_count);
3388 	spdk_json_write_named_uint32(w, "buf_count", g_opts.buf_count);
3389 	spdk_json_write_object_end(w);
3390 	spdk_json_write_object_end(w);
3391 }
3392 
3393 static void
3394 _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump)
3395 {
3396 	struct spdk_accel_crypto_key *key;
3397 
3398 	spdk_spin_lock(&g_keyring_spin);
3399 	TAILQ_FOREACH(key, &g_keyring, link) {
3400 		if (full_dump) {
3401 			_accel_crypto_key_write_config_json(w, key);
3402 		} else {
3403 			_accel_crypto_key_dump_param(w, key);
3404 		}
3405 	}
3406 	spdk_spin_unlock(&g_keyring_spin);
3407 }
3408 
3409 void
3410 _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w)
3411 {
3412 	_accel_crypto_keys_write_config_json(w, false);
3413 }
3414 
3415 void
3416 spdk_accel_write_config_json(struct spdk_json_write_ctx *w)
3417 {
3418 	struct spdk_accel_module_if *accel_module;
3419 	int i;
3420 
3421 	spdk_json_write_array_begin(w);
3422 	accel_write_options(w);
3423 
3424 	if (g_accel_driver != NULL) {
3425 		spdk_json_write_object_begin(w);
3426 		spdk_json_write_named_string(w, "method", "accel_set_driver");
3427 		spdk_json_write_named_object_begin(w, "params");
3428 		spdk_json_write_named_string(w, "name", g_accel_driver->name);
3429 		spdk_json_write_object_end(w);
3430 		spdk_json_write_object_end(w);
3431 	}
3432 
3433 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
3434 		if (accel_module->write_config_json) {
3435 			accel_module->write_config_json(w);
3436 		}
3437 	}
3438 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
3439 		if (g_modules_opc_override[i]) {
3440 			accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]);
3441 		}
3442 	}
3443 
3444 	_accel_crypto_keys_write_config_json(w, true);
3445 
3446 	spdk_json_write_array_end(w);
3447 }
3448 
3449 void
3450 spdk_accel_module_finish(void)
3451 {
3452 	if (!g_accel_module) {
3453 		g_accel_module = TAILQ_FIRST(&spdk_accel_module_list);
3454 	} else {
3455 		g_accel_module = TAILQ_NEXT(g_accel_module, tailq);
3456 	}
3457 
3458 	if (!g_accel_module) {
3459 		if (g_accel_driver != NULL && g_accel_driver->fini != NULL) {
3460 			g_accel_driver->fini();
3461 		}
3462 
3463 		spdk_spin_destroy(&g_keyring_spin);
3464 		spdk_spin_destroy(&g_stats_lock);
3465 		if (g_accel_domain) {
3466 			spdk_memory_domain_destroy(g_accel_domain);
3467 			g_accel_domain = NULL;
3468 		}
3469 		accel_module_finish_cb();
3470 		return;
3471 	}
3472 
3473 	if (g_accel_module->module_fini) {
3474 		spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL);
3475 	} else {
3476 		spdk_accel_module_finish();
3477 	}
3478 }
3479 
3480 static void
3481 accel_io_device_unregister_cb(void *io_device)
3482 {
3483 	struct spdk_accel_crypto_key *key, *key_tmp;
3484 	enum spdk_accel_opcode op;
3485 
3486 	spdk_spin_lock(&g_keyring_spin);
3487 	TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) {
3488 		accel_crypto_key_destroy_unsafe(key);
3489 	}
3490 	spdk_spin_unlock(&g_keyring_spin);
3491 
3492 	for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
3493 		if (g_modules_opc_override[op] != NULL) {
3494 			free(g_modules_opc_override[op]);
3495 			g_modules_opc_override[op] = NULL;
3496 		}
3497 		g_modules_opc[op].module = NULL;
3498 	}
3499 
3500 	spdk_accel_module_finish();
3501 }
3502 
3503 void
3504 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg)
3505 {
3506 	assert(cb_fn != NULL);
3507 
3508 	g_fini_cb_fn = cb_fn;
3509 	g_fini_cb_arg = cb_arg;
3510 
3511 	spdk_io_device_unregister(&spdk_accel_module_list, accel_io_device_unregister_cb);
3512 }
3513 
3514 static struct spdk_accel_driver *
3515 accel_find_driver(const char *name)
3516 {
3517 	struct spdk_accel_driver *driver;
3518 
3519 	TAILQ_FOREACH(driver, &g_accel_drivers, tailq) {
3520 		if (strcmp(driver->name, name) == 0) {
3521 			return driver;
3522 		}
3523 	}
3524 
3525 	return NULL;
3526 }
3527 
3528 int
3529 spdk_accel_set_driver(const char *name)
3530 {
3531 	struct spdk_accel_driver *driver = NULL;
3532 
3533 	if (name != NULL && name[0] != '\0') {
3534 		driver = accel_find_driver(name);
3535 		if (driver == NULL) {
3536 			SPDK_ERRLOG("Couldn't find driver named '%s'\n", name);
3537 			return -ENODEV;
3538 		}
3539 	}
3540 
3541 	g_accel_driver = driver;
3542 
3543 	return 0;
3544 }
3545 
3546 const char *
3547 spdk_accel_get_driver_name(void)
3548 {
3549 	if (!g_accel_driver) {
3550 		return NULL;
3551 	}
3552 
3553 	return g_accel_driver->name;
3554 }
3555 
3556 void
3557 spdk_accel_driver_register(struct spdk_accel_driver *driver)
3558 {
3559 	if (accel_find_driver(driver->name)) {
3560 		SPDK_ERRLOG("Driver named '%s' has already been registered\n", driver->name);
3561 		assert(0);
3562 		return;
3563 	}
3564 
3565 	TAILQ_INSERT_TAIL(&g_accel_drivers, driver, tailq);
3566 }
3567 
3568 int
3569 spdk_accel_set_opts(const struct spdk_accel_opts *opts)
3570 {
3571 	if (!opts) {
3572 		SPDK_ERRLOG("opts cannot be NULL\n");
3573 		return -1;
3574 	}
3575 
3576 	if (!opts->opts_size) {
3577 		SPDK_ERRLOG("opts_size inside opts cannot be zero value\n");
3578 		return -1;
3579 	}
3580 
3581 	if (SPDK_GET_FIELD(opts, task_count, g_opts.task_count,
3582 			   opts->opts_size) < ACCEL_TASKS_IN_SEQUENCE_LIMIT) {
3583 		return -EINVAL;
3584 	}
3585 
3586 #define SET_FIELD(field) \
3587         if (offsetof(struct spdk_accel_opts, field) + sizeof(opts->field) <= opts->opts_size) { \
3588                 g_opts.field = opts->field; \
3589         } \
3590 
3591 	SET_FIELD(small_cache_size);
3592 	SET_FIELD(large_cache_size);
3593 	SET_FIELD(task_count);
3594 	SET_FIELD(sequence_count);
3595 	SET_FIELD(buf_count);
3596 
3597 	g_opts.opts_size = opts->opts_size;
3598 
3599 #undef SET_FIELD
3600 
3601 	return 0;
3602 }
3603 
3604 void
3605 spdk_accel_get_opts(struct spdk_accel_opts *opts, size_t opts_size)
3606 {
3607 	if (!opts) {
3608 		SPDK_ERRLOG("opts should not be NULL\n");
3609 		return;
3610 	}
3611 
3612 	if (!opts_size) {
3613 		SPDK_ERRLOG("opts_size should not be zero value\n");
3614 		return;
3615 	}
3616 
3617 	opts->opts_size = opts_size;
3618 
3619 #define SET_FIELD(field) \
3620 	if (offsetof(struct spdk_accel_opts, field) + sizeof(opts->field) <= opts_size) { \
3621 		opts->field = g_opts.field; \
3622 	} \
3623 
3624 	SET_FIELD(small_cache_size);
3625 	SET_FIELD(large_cache_size);
3626 	SET_FIELD(task_count);
3627 	SET_FIELD(sequence_count);
3628 	SET_FIELD(buf_count);
3629 
3630 #undef SET_FIELD
3631 
3632 	/* Do not remove this statement, you should always update this statement when you adding a new field,
3633 	 * and do not forget to add the SET_FIELD statement for your added field. */
3634 	SPDK_STATIC_ASSERT(sizeof(struct spdk_accel_opts) == 28, "Incorrect size");
3635 }
3636 
3637 struct accel_get_stats_ctx {
3638 	struct accel_stats	stats;
3639 	accel_get_stats_cb	cb_fn;
3640 	void			*cb_arg;
3641 };
3642 
3643 static void
3644 accel_get_channel_stats_done(struct spdk_io_channel_iter *iter, int status)
3645 {
3646 	struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
3647 
3648 	ctx->cb_fn(&ctx->stats, ctx->cb_arg);
3649 	free(ctx);
3650 }
3651 
3652 static void
3653 accel_get_channel_stats(struct spdk_io_channel_iter *iter)
3654 {
3655 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(iter);
3656 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
3657 	struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
3658 
3659 	accel_add_stats(&ctx->stats, &accel_ch->stats);
3660 	spdk_for_each_channel_continue(iter, 0);
3661 }
3662 
3663 int
3664 accel_get_stats(accel_get_stats_cb cb_fn, void *cb_arg)
3665 {
3666 	struct accel_get_stats_ctx *ctx;
3667 
3668 	ctx = calloc(1, sizeof(*ctx));
3669 	if (ctx == NULL) {
3670 		return -ENOMEM;
3671 	}
3672 
3673 	spdk_spin_lock(&g_stats_lock);
3674 	accel_add_stats(&ctx->stats, &g_stats);
3675 	spdk_spin_unlock(&g_stats_lock);
3676 
3677 	ctx->cb_fn = cb_fn;
3678 	ctx->cb_arg = cb_arg;
3679 
3680 	spdk_for_each_channel(&spdk_accel_module_list, accel_get_channel_stats, ctx,
3681 			      accel_get_channel_stats_done);
3682 
3683 	return 0;
3684 }
3685 
3686 void
3687 spdk_accel_get_opcode_stats(struct spdk_io_channel *ch, enum spdk_accel_opcode opcode,
3688 			    struct spdk_accel_opcode_stats *stats, size_t size)
3689 {
3690 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
3691 
3692 #define FIELD_OK(field) \
3693 	offsetof(struct spdk_accel_opcode_stats, field) + sizeof(stats->field) <= size
3694 
3695 #define SET_FIELD(field, value) \
3696 	if (FIELD_OK(field)) { \
3697 		stats->field = value; \
3698 	}
3699 
3700 	SET_FIELD(executed, accel_ch->stats.operations[opcode].executed);
3701 	SET_FIELD(failed, accel_ch->stats.operations[opcode].failed);
3702 	SET_FIELD(num_bytes, accel_ch->stats.operations[opcode].num_bytes);
3703 
3704 #undef FIELD_OK
3705 #undef SET_FIELD
3706 }
3707 
3708 uint8_t
3709 spdk_accel_get_buf_align(enum spdk_accel_opcode opcode,
3710 			 const struct spdk_accel_operation_exec_ctx *ctx)
3711 {
3712 	struct spdk_accel_module_if *module = g_modules_opc[opcode].module;
3713 	struct spdk_accel_opcode_info modinfo = {}, drvinfo = {};
3714 
3715 	if (g_accel_driver != NULL && g_accel_driver->get_operation_info != NULL) {
3716 		g_accel_driver->get_operation_info(opcode, ctx, &drvinfo);
3717 	}
3718 
3719 	if (module->get_operation_info != NULL) {
3720 		module->get_operation_info(opcode, ctx, &modinfo);
3721 	}
3722 
3723 	/* If a driver is set, it'll execute most of the operations, while the rest will usually
3724 	 * fall back to accel_sw, which doesn't have any alignment requirements.  However, to be
3725 	 * extra safe, return the max(driver, module) if a driver delegates some operations to a
3726 	 * hardware module. */
3727 	return spdk_max(modinfo.required_alignment, drvinfo.required_alignment);
3728 }
3729 
3730 struct spdk_accel_module_if *
3731 spdk_accel_get_module(const char *name)
3732 {
3733 	struct spdk_accel_module_if *module;
3734 
3735 	TAILQ_FOREACH(module, &spdk_accel_module_list, tailq) {
3736 		if (strcmp(module->name, name) == 0) {
3737 			return module;
3738 		}
3739 	}
3740 
3741 	return NULL;
3742 }
3743 
3744 int
3745 spdk_accel_get_opc_memory_domains(enum spdk_accel_opcode opcode,
3746 				  struct spdk_memory_domain **domains,
3747 				  int array_size)
3748 {
3749 	assert(opcode < SPDK_ACCEL_OPC_LAST);
3750 
3751 	if (g_modules_opc[opcode].module->get_memory_domains) {
3752 		return g_modules_opc[opcode].module->get_memory_domains(domains, array_size);
3753 	}
3754 
3755 	return 0;
3756 }
3757 
3758 SPDK_LOG_REGISTER_COMPONENT(accel)
3759