xref: /spdk/lib/accel/accel.c (revision 66289a6dbe28217365daa40fd92dcf327871c2e8)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2020 Intel Corporation.
3  *   Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES.
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/accel_module.h"
10 
11 #include "accel_internal.h"
12 
13 #include "spdk/dma.h"
14 #include "spdk/env.h"
15 #include "spdk/likely.h"
16 #include "spdk/log.h"
17 #include "spdk/thread.h"
18 #include "spdk/json.h"
19 #include "spdk/crc32.h"
20 #include "spdk/util.h"
21 #include "spdk/hexlify.h"
22 #include "spdk/string.h"
23 
24 /* Accelerator Framework: The following provides a top level
25  * generic API for the accelerator functions defined here. Modules,
26  * such as the one in /module/accel/ioat, supply the implementation
27  * with the exception of the pure software implementation contained
28  * later in this file.
29  */
30 
31 #define ALIGN_4K			0x1000
32 #define ACCEL_TASKS_PER_CHANNEL		2048
33 #define ACCEL_SMALL_CACHE_SIZE		128
34 #define ACCEL_LARGE_CACHE_SIZE		16
35 /* Set MSB, so we don't return NULL pointers as buffers */
36 #define ACCEL_BUFFER_BASE		((void *)(1ull << 63))
37 #define ACCEL_BUFFER_OFFSET_MASK	((uintptr_t)ACCEL_BUFFER_BASE - 1)
38 
39 #define ACCEL_CRYPTO_TWEAK_MODE_DEFAULT	SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA
40 #define ACCEL_TASKS_IN_SEQUENCE_LIMIT	8
41 
42 struct accel_module {
43 	struct spdk_accel_module_if	*module;
44 	bool				supports_memory_domains;
45 };
46 
47 /* Largest context size for all accel modules */
48 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task);
49 
50 static struct spdk_accel_module_if *g_accel_module = NULL;
51 static spdk_accel_fini_cb g_fini_cb_fn = NULL;
52 static void *g_fini_cb_arg = NULL;
53 static bool g_modules_started = false;
54 static struct spdk_memory_domain *g_accel_domain;
55 
56 /* Global list of registered accelerator modules */
57 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list =
58 	TAILQ_HEAD_INITIALIZER(spdk_accel_module_list);
59 
60 /* Crypto keyring */
61 static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring);
62 static struct spdk_spinlock g_keyring_spin;
63 
64 /* Global array mapping capabilities to modules */
65 static struct accel_module g_modules_opc[SPDK_ACCEL_OPC_LAST] = {};
66 static char *g_modules_opc_override[SPDK_ACCEL_OPC_LAST] = {};
67 TAILQ_HEAD(, spdk_accel_driver) g_accel_drivers = TAILQ_HEAD_INITIALIZER(g_accel_drivers);
68 static struct spdk_accel_driver *g_accel_driver;
69 static struct spdk_accel_opts g_opts = {
70 	.small_cache_size = ACCEL_SMALL_CACHE_SIZE,
71 	.large_cache_size = ACCEL_LARGE_CACHE_SIZE,
72 	.task_count = ACCEL_TASKS_PER_CHANNEL,
73 	.sequence_count = ACCEL_TASKS_PER_CHANNEL,
74 	.buf_count = ACCEL_TASKS_PER_CHANNEL,
75 };
76 static struct accel_stats g_stats;
77 static struct spdk_spinlock g_stats_lock;
78 
79 static const char *g_opcode_strings[SPDK_ACCEL_OPC_LAST] = {
80 	"copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c",
81 	"compress", "decompress", "encrypt", "decrypt", "xor",
82 	"dif_verify", "dif_verify_copy", "dif_generate", "dif_generate_copy",
83 	"dix_generate", "dix_verify"
84 };
85 
86 enum accel_sequence_state {
87 	ACCEL_SEQUENCE_STATE_INIT,
88 	ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF,
89 	ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF,
90 	ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF,
91 	ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF,
92 	ACCEL_SEQUENCE_STATE_PULL_DATA,
93 	ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA,
94 	ACCEL_SEQUENCE_STATE_EXEC_TASK,
95 	ACCEL_SEQUENCE_STATE_AWAIT_TASK,
96 	ACCEL_SEQUENCE_STATE_COMPLETE_TASK,
97 	ACCEL_SEQUENCE_STATE_NEXT_TASK,
98 	ACCEL_SEQUENCE_STATE_PUSH_DATA,
99 	ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA,
100 	ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS,
101 	ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS,
102 	ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS,
103 	ACCEL_SEQUENCE_STATE_ERROR,
104 	ACCEL_SEQUENCE_STATE_MAX,
105 };
106 
107 static const char *g_seq_states[]
108 __attribute__((unused)) = {
109 	[ACCEL_SEQUENCE_STATE_INIT] = "init",
110 	[ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf",
111 	[ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf",
112 	[ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf",
113 	[ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf",
114 	[ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data",
115 	[ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data",
116 	[ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task",
117 	[ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task",
118 	[ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task",
119 	[ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task",
120 	[ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data",
121 	[ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data",
122 	[ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS] = "driver-exec-tasks",
123 	[ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS] = "driver-await-tasks",
124 	[ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS] = "driver-complete-tasks",
125 	[ACCEL_SEQUENCE_STATE_ERROR] = "error",
126 	[ACCEL_SEQUENCE_STATE_MAX] = "",
127 };
128 
129 #define ACCEL_SEQUENCE_STATE_STRING(s) \
130 	(((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \
131 	 ? g_seq_states[s] : "unknown")
132 
133 struct accel_buffer {
134 	struct spdk_accel_sequence	*seq;
135 	void				*buf;
136 	uint64_t			len;
137 	struct spdk_iobuf_entry		iobuf;
138 	spdk_accel_sequence_get_buf_cb	cb_fn;
139 	void				*cb_ctx;
140 	SLIST_ENTRY(accel_buffer)	link;
141 	struct accel_io_channel		*ch;
142 };
143 
144 struct accel_io_channel {
145 	struct spdk_io_channel			*module_ch[SPDK_ACCEL_OPC_LAST];
146 	struct spdk_io_channel			*driver_channel;
147 	void					*task_pool_base;
148 	struct spdk_accel_sequence		*seq_pool_base;
149 	struct accel_buffer			*buf_pool_base;
150 	struct spdk_accel_task_aux_data		*task_aux_data_base;
151 	STAILQ_HEAD(, spdk_accel_task)		task_pool;
152 	SLIST_HEAD(, spdk_accel_task_aux_data)	task_aux_data_pool;
153 	SLIST_HEAD(, spdk_accel_sequence)	seq_pool;
154 	SLIST_HEAD(, accel_buffer)		buf_pool;
155 	struct spdk_iobuf_channel		iobuf;
156 	struct accel_stats			stats;
157 };
158 
159 TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task);
160 
161 struct spdk_accel_sequence {
162 	struct accel_io_channel			*ch;
163 	struct accel_sequence_tasks		tasks;
164 	SLIST_HEAD(, accel_buffer)		bounce_bufs;
165 	int					status;
166 	/* state uses enum accel_sequence_state */
167 	uint8_t					state;
168 	bool					in_process_sequence;
169 	spdk_accel_completion_cb		cb_fn;
170 	void					*cb_arg;
171 	SLIST_ENTRY(spdk_accel_sequence)	link;
172 };
173 SPDK_STATIC_ASSERT(sizeof(struct spdk_accel_sequence) == 64, "invalid size");
174 
175 #define accel_update_stats(ch, event, v) \
176 	do { \
177 		(ch)->stats.event += (v); \
178 	} while (0)
179 
180 #define accel_update_task_stats(ch, task, event, v) \
181 	accel_update_stats(ch, operations[(task)->op_code].event, v)
182 
183 static inline void accel_sequence_task_cb(struct spdk_accel_sequence *seq,
184 		struct spdk_accel_task *task, int status);
185 
186 static inline void
187 accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state)
188 {
189 	SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq,
190 		      ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state));
191 	assert(seq->state != ACCEL_SEQUENCE_STATE_ERROR || state == ACCEL_SEQUENCE_STATE_ERROR);
192 	seq->state = state;
193 }
194 
195 static void
196 accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status)
197 {
198 	accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
199 	assert(status != 0);
200 	seq->status = status;
201 }
202 
203 int
204 spdk_accel_get_opc_module_name(enum spdk_accel_opcode opcode, const char **module_name)
205 {
206 	if (opcode >= SPDK_ACCEL_OPC_LAST) {
207 		/* invalid opcode */
208 		return -EINVAL;
209 	}
210 
211 	if (g_modules_opc[opcode].module) {
212 		*module_name = g_modules_opc[opcode].module->name;
213 	} else {
214 		return -ENOENT;
215 	}
216 
217 	return 0;
218 }
219 
220 void
221 _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn)
222 {
223 	struct spdk_accel_module_if *accel_module;
224 	enum spdk_accel_opcode opcode;
225 	int j = 0;
226 
227 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
228 		for (opcode = 0; opcode < SPDK_ACCEL_OPC_LAST; opcode++) {
229 			if (accel_module->supports_opcode(opcode)) {
230 				info->ops[j] = opcode;
231 				j++;
232 			}
233 		}
234 		info->name = accel_module->name;
235 		info->num_ops = j;
236 		fn(info);
237 		j = 0;
238 	}
239 }
240 
241 const char *
242 spdk_accel_get_opcode_name(enum spdk_accel_opcode opcode)
243 {
244 	if (opcode < SPDK_ACCEL_OPC_LAST) {
245 		return g_opcode_strings[opcode];
246 	}
247 
248 	return NULL;
249 }
250 
251 int
252 spdk_accel_assign_opc(enum spdk_accel_opcode opcode, const char *name)
253 {
254 	char *copy;
255 
256 	if (g_modules_started == true) {
257 		/* we don't allow re-assignment once things have started */
258 		return -EINVAL;
259 	}
260 
261 	if (opcode >= SPDK_ACCEL_OPC_LAST) {
262 		/* invalid opcode */
263 		return -EINVAL;
264 	}
265 
266 	copy = strdup(name);
267 	if (copy == NULL) {
268 		return -ENOMEM;
269 	}
270 
271 	/* module selection will be validated after the framework starts. */
272 	free(g_modules_opc_override[opcode]);
273 	g_modules_opc_override[opcode] = copy;
274 
275 	return 0;
276 }
277 
278 inline static struct spdk_accel_task *
279 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg)
280 {
281 	struct spdk_accel_task *accel_task;
282 
283 	accel_task = STAILQ_FIRST(&accel_ch->task_pool);
284 	if (spdk_unlikely(accel_task == NULL)) {
285 		accel_update_stats(accel_ch, retry.task, 1);
286 		return NULL;
287 	}
288 
289 	accel_update_stats(accel_ch, task_outstanding, 1);
290 	STAILQ_REMOVE_HEAD(&accel_ch->task_pool, link);
291 	accel_task->link.stqe_next = NULL;
292 
293 	accel_task->cb_fn = cb_fn;
294 	accel_task->cb_arg = cb_arg;
295 	accel_task->accel_ch = accel_ch;
296 	accel_task->s.iovs = NULL;
297 	accel_task->d.iovs = NULL;
298 
299 	return accel_task;
300 }
301 
302 static void
303 _put_task(struct accel_io_channel *ch, struct spdk_accel_task *task)
304 {
305 	STAILQ_INSERT_HEAD(&ch->task_pool, task, link);
306 	accel_update_stats(ch, task_outstanding, -1);
307 }
308 
309 void
310 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status)
311 {
312 	struct accel_io_channel		*accel_ch = accel_task->accel_ch;
313 	spdk_accel_completion_cb	cb_fn;
314 	void				*cb_arg;
315 
316 	accel_update_task_stats(accel_ch, accel_task, executed, 1);
317 	accel_update_task_stats(accel_ch, accel_task, num_bytes, accel_task->nbytes);
318 	if (spdk_unlikely(status != 0)) {
319 		accel_update_task_stats(accel_ch, accel_task, failed, 1);
320 	}
321 
322 	if (accel_task->seq) {
323 		accel_sequence_task_cb(accel_task->seq, accel_task, status);
324 		return;
325 	}
326 
327 	cb_fn = accel_task->cb_fn;
328 	cb_arg = accel_task->cb_arg;
329 
330 	if (accel_task->has_aux) {
331 		SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task->aux, link);
332 		accel_task->aux = NULL;
333 		accel_task->has_aux = false;
334 	}
335 
336 	/* We should put the accel_task into the list firstly in order to avoid
337 	 * the accel task list is exhausted when there is recursive call to
338 	 * allocate accel_task in user's call back function (cb_fn)
339 	 */
340 	_put_task(accel_ch, accel_task);
341 
342 	cb_fn(cb_arg, status);
343 }
344 
345 static inline int
346 accel_submit_task(struct accel_io_channel *accel_ch, struct spdk_accel_task *task)
347 {
348 	struct spdk_io_channel *module_ch = accel_ch->module_ch[task->op_code];
349 	struct spdk_accel_module_if *module = g_modules_opc[task->op_code].module;
350 	int rc;
351 
352 	rc = module->submit_tasks(module_ch, task);
353 	if (spdk_unlikely(rc != 0)) {
354 		accel_update_task_stats(accel_ch, task, failed, 1);
355 	}
356 
357 	return rc;
358 }
359 
360 static inline uint64_t
361 accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt)
362 {
363 	uint64_t result = 0;
364 	uint32_t i;
365 
366 	for (i = 0; i < iovcnt; ++i) {
367 		result += iovs[i].iov_len;
368 	}
369 
370 	return result;
371 }
372 
373 #define ACCEL_TASK_ALLOC_AUX_BUF(task)						\
374 do {										\
375         (task)->aux = SLIST_FIRST(&(task)->accel_ch->task_aux_data_pool);	\
376         if (spdk_unlikely(!(task)->aux)) {					\
377                 SPDK_ERRLOG("Fatal problem, aux data was not allocated\n");	\
378                 _put_task(task->accel_ch, task);				\
379                 assert(0);							\
380                 return -ENOMEM;							\
381         }									\
382         SLIST_REMOVE_HEAD(&(task)->accel_ch->task_aux_data_pool, link);		\
383         (task)->has_aux = true;							\
384 } while (0)
385 
386 /* Accel framework public API for copy function */
387 int
388 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src,
389 		       uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
390 {
391 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
392 	struct spdk_accel_task *accel_task;
393 
394 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
395 	if (spdk_unlikely(accel_task == NULL)) {
396 		return -ENOMEM;
397 	}
398 
399 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
400 
401 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
402 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
403 	accel_task->d.iovs[0].iov_base = dst;
404 	accel_task->d.iovs[0].iov_len = nbytes;
405 	accel_task->d.iovcnt = 1;
406 	accel_task->s.iovs[0].iov_base = src;
407 	accel_task->s.iovs[0].iov_len = nbytes;
408 	accel_task->s.iovcnt = 1;
409 	accel_task->nbytes = nbytes;
410 	accel_task->op_code = SPDK_ACCEL_OPC_COPY;
411 	accel_task->src_domain = NULL;
412 	accel_task->dst_domain = NULL;
413 
414 	return accel_submit_task(accel_ch, accel_task);
415 }
416 
417 /* Accel framework public API for dual cast copy function */
418 int
419 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1,
420 			   void *dst2, void *src, uint64_t nbytes,
421 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
422 {
423 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
424 	struct spdk_accel_task *accel_task;
425 
426 	if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) {
427 		SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n");
428 		return -EINVAL;
429 	}
430 
431 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
432 	if (spdk_unlikely(accel_task == NULL)) {
433 		return -ENOMEM;
434 	}
435 
436 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
437 
438 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
439 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
440 	accel_task->d2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST2];
441 	accel_task->d.iovs[0].iov_base = dst1;
442 	accel_task->d.iovs[0].iov_len = nbytes;
443 	accel_task->d.iovcnt = 1;
444 	accel_task->d2.iovs[0].iov_base = dst2;
445 	accel_task->d2.iovs[0].iov_len = nbytes;
446 	accel_task->d2.iovcnt = 1;
447 	accel_task->s.iovs[0].iov_base = src;
448 	accel_task->s.iovs[0].iov_len = nbytes;
449 	accel_task->s.iovcnt = 1;
450 	accel_task->nbytes = nbytes;
451 	accel_task->op_code = SPDK_ACCEL_OPC_DUALCAST;
452 	accel_task->src_domain = NULL;
453 	accel_task->dst_domain = NULL;
454 
455 	return accel_submit_task(accel_ch, accel_task);
456 }
457 
458 /* Accel framework public API for compare function */
459 
460 int
461 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1,
462 			  void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
463 			  void *cb_arg)
464 {
465 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
466 	struct spdk_accel_task *accel_task;
467 
468 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
469 	if (spdk_unlikely(accel_task == NULL)) {
470 		return -ENOMEM;
471 	}
472 
473 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
474 
475 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
476 	accel_task->s2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC2];
477 	accel_task->s.iovs[0].iov_base = src1;
478 	accel_task->s.iovs[0].iov_len = nbytes;
479 	accel_task->s.iovcnt = 1;
480 	accel_task->s2.iovs[0].iov_base = src2;
481 	accel_task->s2.iovs[0].iov_len = nbytes;
482 	accel_task->s2.iovcnt = 1;
483 	accel_task->nbytes = nbytes;
484 	accel_task->op_code = SPDK_ACCEL_OPC_COMPARE;
485 	accel_task->src_domain = NULL;
486 	accel_task->dst_domain = NULL;
487 
488 	return accel_submit_task(accel_ch, accel_task);
489 }
490 
491 /* Accel framework public API for fill function */
492 int
493 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst,
494 		       uint8_t fill, uint64_t nbytes,
495 		       spdk_accel_completion_cb cb_fn, void *cb_arg)
496 {
497 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
498 	struct spdk_accel_task *accel_task;
499 
500 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
501 	if (spdk_unlikely(accel_task == NULL)) {
502 		return -ENOMEM;
503 	}
504 
505 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
506 
507 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
508 	accel_task->d.iovs[0].iov_base = dst;
509 	accel_task->d.iovs[0].iov_len = nbytes;
510 	accel_task->d.iovcnt = 1;
511 	accel_task->nbytes = nbytes;
512 	memset(&accel_task->fill_pattern, fill, sizeof(uint64_t));
513 	accel_task->op_code = SPDK_ACCEL_OPC_FILL;
514 	accel_task->src_domain = NULL;
515 	accel_task->dst_domain = NULL;
516 
517 	return accel_submit_task(accel_ch, accel_task);
518 }
519 
520 /* Accel framework public API for CRC-32C function */
521 int
522 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst,
523 			 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
524 			 void *cb_arg)
525 {
526 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
527 	struct spdk_accel_task *accel_task;
528 
529 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
530 	if (spdk_unlikely(accel_task == NULL)) {
531 		return -ENOMEM;
532 	}
533 
534 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
535 
536 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
537 	accel_task->s.iovs[0].iov_base = src;
538 	accel_task->s.iovs[0].iov_len = nbytes;
539 	accel_task->s.iovcnt = 1;
540 	accel_task->nbytes = nbytes;
541 	accel_task->crc_dst = crc_dst;
542 	accel_task->seed = seed;
543 	accel_task->op_code = SPDK_ACCEL_OPC_CRC32C;
544 	accel_task->src_domain = NULL;
545 	accel_task->dst_domain = NULL;
546 
547 	return accel_submit_task(accel_ch, accel_task);
548 }
549 
550 /* Accel framework public API for chained CRC-32C function */
551 int
552 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst,
553 			  struct iovec *iov, uint32_t iov_cnt, uint32_t seed,
554 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
555 {
556 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
557 	struct spdk_accel_task *accel_task;
558 
559 	if (iov == NULL) {
560 		SPDK_ERRLOG("iov should not be NULL");
561 		return -EINVAL;
562 	}
563 
564 	if (!iov_cnt) {
565 		SPDK_ERRLOG("iovcnt should not be zero value\n");
566 		return -EINVAL;
567 	}
568 
569 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
570 	if (spdk_unlikely(accel_task == NULL)) {
571 		SPDK_ERRLOG("no memory\n");
572 		assert(0);
573 		return -ENOMEM;
574 	}
575 
576 	accel_task->s.iovs = iov;
577 	accel_task->s.iovcnt = iov_cnt;
578 	accel_task->nbytes = accel_get_iovlen(iov, iov_cnt);
579 	accel_task->crc_dst = crc_dst;
580 	accel_task->seed = seed;
581 	accel_task->op_code = SPDK_ACCEL_OPC_CRC32C;
582 	accel_task->src_domain = NULL;
583 	accel_task->dst_domain = NULL;
584 
585 	return accel_submit_task(accel_ch, accel_task);
586 }
587 
588 /* Accel framework public API for copy with CRC-32C function */
589 int
590 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst,
591 			      void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes,
592 			      spdk_accel_completion_cb cb_fn, void *cb_arg)
593 {
594 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
595 	struct spdk_accel_task *accel_task;
596 
597 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
598 	if (spdk_unlikely(accel_task == NULL)) {
599 		return -ENOMEM;
600 	}
601 
602 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
603 
604 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
605 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
606 	accel_task->d.iovs[0].iov_base = dst;
607 	accel_task->d.iovs[0].iov_len = nbytes;
608 	accel_task->d.iovcnt = 1;
609 	accel_task->s.iovs[0].iov_base = src;
610 	accel_task->s.iovs[0].iov_len = nbytes;
611 	accel_task->s.iovcnt = 1;
612 	accel_task->nbytes = nbytes;
613 	accel_task->crc_dst = crc_dst;
614 	accel_task->seed = seed;
615 	accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C;
616 	accel_task->src_domain = NULL;
617 	accel_task->dst_domain = NULL;
618 
619 	return accel_submit_task(accel_ch, accel_task);
620 }
621 
622 /* Accel framework public API for chained copy + CRC-32C function */
623 int
624 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst,
625 			       struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst,
626 			       uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg)
627 {
628 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
629 	struct spdk_accel_task *accel_task;
630 	uint64_t nbytes;
631 
632 	if (src_iovs == NULL) {
633 		SPDK_ERRLOG("iov should not be NULL");
634 		return -EINVAL;
635 	}
636 
637 	if (!iov_cnt) {
638 		SPDK_ERRLOG("iovcnt should not be zero value\n");
639 		return -EINVAL;
640 	}
641 
642 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
643 	if (spdk_unlikely(accel_task == NULL)) {
644 		SPDK_ERRLOG("no memory\n");
645 		assert(0);
646 		return -ENOMEM;
647 	}
648 
649 	nbytes = accel_get_iovlen(src_iovs, iov_cnt);
650 
651 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
652 
653 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
654 	accel_task->d.iovs[0].iov_base = dst;
655 	accel_task->d.iovs[0].iov_len = nbytes;
656 	accel_task->d.iovcnt = 1;
657 	accel_task->s.iovs = src_iovs;
658 	accel_task->s.iovcnt = iov_cnt;
659 	accel_task->nbytes = nbytes;
660 	accel_task->crc_dst = crc_dst;
661 	accel_task->seed = seed;
662 	accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C;
663 	accel_task->src_domain = NULL;
664 	accel_task->dst_domain = NULL;
665 
666 	return accel_submit_task(accel_ch, accel_task);
667 }
668 
669 int
670 spdk_accel_get_compress_level_range(enum spdk_accel_comp_algo comp_algo,
671 				    uint32_t *min_level, uint32_t *max_level)
672 {
673 	struct spdk_accel_module_if *module = g_modules_opc[SPDK_ACCEL_OPC_COMPRESS].module;
674 
675 	if (module->get_compress_level_range == NULL) {
676 		SPDK_ERRLOG("Module %s doesn't implement callback fn get_compress_level_range.\n", module->name);
677 		return -ENOTSUP;
678 	}
679 
680 	return module->get_compress_level_range(comp_algo, min_level, max_level);
681 }
682 
683 static int
684 _accel_check_comp_algo(enum spdk_accel_comp_algo comp_algo)
685 {
686 	struct spdk_accel_module_if *module = g_modules_opc[SPDK_ACCEL_OPC_COMPRESS].module;
687 
688 	if (!module->compress_supports_algo || !module->compress_supports_algo(comp_algo)) {
689 		SPDK_ERRLOG("Module %s doesn't support compression algo %d\n", module->name, comp_algo);
690 		return -ENOTSUP;
691 	}
692 
693 	return 0;
694 }
695 
696 int
697 spdk_accel_submit_compress_ext(struct spdk_io_channel *ch, void *dst, uint64_t nbytes,
698 			       struct iovec *src_iovs, size_t src_iovcnt,
699 			       enum spdk_accel_comp_algo comp_algo, uint32_t comp_level,
700 			       uint32_t *output_size, spdk_accel_completion_cb cb_fn, void *cb_arg)
701 {
702 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
703 	struct spdk_accel_task *accel_task;
704 	int rc;
705 
706 	rc = _accel_check_comp_algo(comp_algo);
707 	if (spdk_unlikely(rc != 0)) {
708 		return rc;
709 	}
710 
711 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
712 	if (spdk_unlikely(accel_task == NULL)) {
713 		return -ENOMEM;
714 	}
715 
716 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
717 
718 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
719 	accel_task->d.iovs[0].iov_base = dst;
720 	accel_task->d.iovs[0].iov_len = nbytes;
721 	accel_task->d.iovcnt = 1;
722 	accel_task->output_size = output_size;
723 	accel_task->s.iovs = src_iovs;
724 	accel_task->s.iovcnt = src_iovcnt;
725 	accel_task->nbytes = nbytes;
726 	accel_task->op_code = SPDK_ACCEL_OPC_COMPRESS;
727 	accel_task->src_domain = NULL;
728 	accel_task->dst_domain = NULL;
729 	accel_task->comp.algo = comp_algo;
730 	accel_task->comp.level = comp_level;
731 
732 	return accel_submit_task(accel_ch, accel_task);
733 }
734 
735 int
736 spdk_accel_submit_decompress_ext(struct spdk_io_channel *ch, struct iovec *dst_iovs,
737 				 size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
738 				 enum spdk_accel_comp_algo decomp_algo, uint32_t *output_size,
739 				 spdk_accel_completion_cb cb_fn, void *cb_arg)
740 {
741 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
742 	struct spdk_accel_task *accel_task;
743 	int rc;
744 
745 	rc = _accel_check_comp_algo(decomp_algo);
746 	if (spdk_unlikely(rc != 0)) {
747 		return rc;
748 	}
749 
750 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
751 	if (spdk_unlikely(accel_task == NULL)) {
752 		return -ENOMEM;
753 	}
754 
755 	accel_task->output_size = output_size;
756 	accel_task->s.iovs = src_iovs;
757 	accel_task->s.iovcnt = src_iovcnt;
758 	accel_task->d.iovs = dst_iovs;
759 	accel_task->d.iovcnt = dst_iovcnt;
760 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
761 	accel_task->op_code = SPDK_ACCEL_OPC_DECOMPRESS;
762 	accel_task->src_domain = NULL;
763 	accel_task->dst_domain = NULL;
764 	accel_task->comp.algo = decomp_algo;
765 
766 	return accel_submit_task(accel_ch, accel_task);
767 }
768 
769 int
770 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes,
771 			   struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size,
772 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
773 {
774 	return spdk_accel_submit_compress_ext(ch, dst, nbytes, src_iovs, src_iovcnt,
775 					      SPDK_ACCEL_COMP_ALGO_DEFLATE, 1, output_size, cb_fn, cb_arg);
776 }
777 
778 int
779 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs,
780 			     size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
781 			     uint32_t *output_size, spdk_accel_completion_cb cb_fn,
782 			     void *cb_arg)
783 {
784 	return spdk_accel_submit_decompress_ext(ch, dst_iovs, dst_iovcnt, src_iovs, src_iovcnt,
785 						SPDK_ACCEL_COMP_ALGO_DEFLATE, output_size, cb_fn, cb_arg);
786 }
787 
788 int
789 spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
790 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
791 			  struct iovec *src_iovs, uint32_t src_iovcnt,
792 			  uint64_t iv, uint32_t block_size,
793 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
794 {
795 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
796 	struct spdk_accel_task *accel_task;
797 
798 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
799 		return -EINVAL;
800 	}
801 
802 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
803 	if (spdk_unlikely(accel_task == NULL)) {
804 		return -ENOMEM;
805 	}
806 
807 	accel_task->crypto_key = key;
808 	accel_task->s.iovs = src_iovs;
809 	accel_task->s.iovcnt = src_iovcnt;
810 	accel_task->d.iovs = dst_iovs;
811 	accel_task->d.iovcnt = dst_iovcnt;
812 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
813 	accel_task->iv = iv;
814 	accel_task->block_size = block_size;
815 	accel_task->op_code = SPDK_ACCEL_OPC_ENCRYPT;
816 	accel_task->src_domain = NULL;
817 	accel_task->dst_domain = NULL;
818 
819 	return accel_submit_task(accel_ch, accel_task);
820 }
821 
822 int
823 spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
824 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
825 			  struct iovec *src_iovs, uint32_t src_iovcnt,
826 			  uint64_t iv, uint32_t block_size,
827 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
828 {
829 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
830 	struct spdk_accel_task *accel_task;
831 
832 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
833 		return -EINVAL;
834 	}
835 
836 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
837 	if (spdk_unlikely(accel_task == NULL)) {
838 		return -ENOMEM;
839 	}
840 
841 	accel_task->crypto_key = key;
842 	accel_task->s.iovs = src_iovs;
843 	accel_task->s.iovcnt = src_iovcnt;
844 	accel_task->d.iovs = dst_iovs;
845 	accel_task->d.iovcnt = dst_iovcnt;
846 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
847 	accel_task->iv = iv;
848 	accel_task->block_size = block_size;
849 	accel_task->op_code = SPDK_ACCEL_OPC_DECRYPT;
850 	accel_task->src_domain = NULL;
851 	accel_task->dst_domain = NULL;
852 
853 	return accel_submit_task(accel_ch, accel_task);
854 }
855 
856 int
857 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs,
858 		      uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
859 {
860 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
861 	struct spdk_accel_task *accel_task;
862 
863 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
864 	if (spdk_unlikely(accel_task == NULL)) {
865 		return -ENOMEM;
866 	}
867 
868 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
869 
870 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
871 	accel_task->nsrcs.srcs = sources;
872 	accel_task->nsrcs.cnt = nsrcs;
873 	accel_task->d.iovs[0].iov_base = dst;
874 	accel_task->d.iovs[0].iov_len = nbytes;
875 	accel_task->d.iovcnt = 1;
876 	accel_task->nbytes = nbytes;
877 	accel_task->op_code = SPDK_ACCEL_OPC_XOR;
878 	accel_task->src_domain = NULL;
879 	accel_task->dst_domain = NULL;
880 
881 	return accel_submit_task(accel_ch, accel_task);
882 }
883 
884 int
885 spdk_accel_submit_dif_verify(struct spdk_io_channel *ch,
886 			     struct iovec *iovs, size_t iovcnt, uint32_t num_blocks,
887 			     const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
888 			     spdk_accel_completion_cb cb_fn, void *cb_arg)
889 {
890 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
891 	struct spdk_accel_task *accel_task;
892 
893 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
894 	if (accel_task == NULL) {
895 		return -ENOMEM;
896 	}
897 
898 	accel_task->s.iovs = iovs;
899 	accel_task->s.iovcnt = iovcnt;
900 	accel_task->dif.ctx = ctx;
901 	accel_task->dif.err = err;
902 	accel_task->dif.num_blocks = num_blocks;
903 	accel_task->nbytes = num_blocks * ctx->block_size;
904 	accel_task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY;
905 	accel_task->src_domain = NULL;
906 	accel_task->dst_domain = NULL;
907 
908 	return accel_submit_task(accel_ch, accel_task);
909 }
910 
911 int
912 spdk_accel_submit_dif_generate(struct spdk_io_channel *ch,
913 			       struct iovec *iovs, size_t iovcnt, uint32_t num_blocks,
914 			       const struct spdk_dif_ctx *ctx,
915 			       spdk_accel_completion_cb cb_fn, void *cb_arg)
916 {
917 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
918 	struct spdk_accel_task *accel_task;
919 
920 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
921 	if (accel_task == NULL) {
922 		return -ENOMEM;
923 	}
924 
925 	accel_task->s.iovs = iovs;
926 	accel_task->s.iovcnt = iovcnt;
927 	accel_task->dif.ctx = ctx;
928 	accel_task->dif.num_blocks = num_blocks;
929 	accel_task->nbytes = num_blocks * ctx->block_size;
930 	accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE;
931 	accel_task->src_domain = NULL;
932 	accel_task->dst_domain = NULL;
933 
934 	return accel_submit_task(accel_ch, accel_task);
935 }
936 
937 int
938 spdk_accel_submit_dif_generate_copy(struct spdk_io_channel *ch, struct iovec *dst_iovs,
939 				    size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
940 				    uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
941 				    spdk_accel_completion_cb cb_fn, void *cb_arg)
942 {
943 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
944 	struct spdk_accel_task *accel_task;
945 
946 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
947 	if (accel_task == NULL) {
948 		return -ENOMEM;
949 	}
950 
951 	accel_task->s.iovs = src_iovs;
952 	accel_task->s.iovcnt = src_iovcnt;
953 	accel_task->d.iovs = dst_iovs;
954 	accel_task->d.iovcnt = dst_iovcnt;
955 	accel_task->dif.ctx = ctx;
956 	accel_task->dif.num_blocks = num_blocks;
957 	accel_task->nbytes = num_blocks * ctx->block_size;
958 	accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE_COPY;
959 	accel_task->src_domain = NULL;
960 	accel_task->dst_domain = NULL;
961 
962 	return accel_submit_task(accel_ch, accel_task);
963 }
964 
965 int
966 spdk_accel_submit_dif_verify_copy(struct spdk_io_channel *ch,
967 				  struct iovec *dst_iovs, size_t dst_iovcnt,
968 				  struct iovec *src_iovs, size_t src_iovcnt, uint32_t num_blocks,
969 				  const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
970 				  spdk_accel_completion_cb cb_fn, void *cb_arg)
971 {
972 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
973 	struct spdk_accel_task *accel_task;
974 
975 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
976 	if (accel_task == NULL) {
977 		return -ENOMEM;
978 	}
979 
980 	accel_task->s.iovs = src_iovs;
981 	accel_task->s.iovcnt = src_iovcnt;
982 	accel_task->d.iovs = dst_iovs;
983 	accel_task->d.iovcnt = dst_iovcnt;
984 	accel_task->dif.ctx = ctx;
985 	accel_task->dif.err = err;
986 	accel_task->dif.num_blocks = num_blocks;
987 	accel_task->nbytes = num_blocks * ctx->block_size;
988 	accel_task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY_COPY;
989 	accel_task->src_domain = NULL;
990 	accel_task->dst_domain = NULL;
991 
992 	return accel_submit_task(accel_ch, accel_task);
993 }
994 
995 int
996 spdk_accel_submit_dix_generate(struct spdk_io_channel *ch, struct iovec *iovs,
997 			       size_t iovcnt, struct iovec *md_iov, uint32_t num_blocks,
998 			       const struct spdk_dif_ctx *ctx, spdk_accel_completion_cb cb_fn,
999 			       void *cb_arg)
1000 {
1001 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1002 	struct spdk_accel_task *accel_task;
1003 
1004 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
1005 	if (accel_task == NULL) {
1006 		return -ENOMEM;
1007 	}
1008 
1009 	accel_task->s.iovs = iovs;
1010 	accel_task->s.iovcnt = iovcnt;
1011 	accel_task->d.iovs = md_iov;
1012 	accel_task->d.iovcnt = 1;
1013 	accel_task->dif.ctx = ctx;
1014 	accel_task->dif.num_blocks = num_blocks;
1015 	accel_task->nbytes = num_blocks * ctx->block_size;
1016 	accel_task->op_code = SPDK_ACCEL_OPC_DIX_GENERATE;
1017 	accel_task->src_domain = NULL;
1018 	accel_task->dst_domain = NULL;
1019 
1020 	return accel_submit_task(accel_ch, accel_task);
1021 }
1022 
1023 int
1024 spdk_accel_submit_dix_verify(struct spdk_io_channel *ch, struct iovec *iovs,
1025 			     size_t iovcnt, struct iovec *md_iov, uint32_t num_blocks,
1026 			     const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
1027 			     spdk_accel_completion_cb cb_fn, void *cb_arg)
1028 {
1029 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1030 	struct spdk_accel_task *accel_task;
1031 
1032 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
1033 	if (accel_task == NULL) {
1034 		return -ENOMEM;
1035 	}
1036 
1037 	accel_task->s.iovs = iovs;
1038 	accel_task->s.iovcnt = iovcnt;
1039 	accel_task->d.iovs = md_iov;
1040 	accel_task->d.iovcnt = 1;
1041 	accel_task->dif.ctx = ctx;
1042 	accel_task->dif.err = err;
1043 	accel_task->dif.num_blocks = num_blocks;
1044 	accel_task->nbytes = num_blocks * ctx->block_size;
1045 	accel_task->op_code = SPDK_ACCEL_OPC_DIX_VERIFY;
1046 	accel_task->src_domain = NULL;
1047 	accel_task->dst_domain = NULL;
1048 
1049 	return accel_submit_task(accel_ch, accel_task);
1050 }
1051 
1052 static inline struct accel_buffer *
1053 accel_get_buf(struct accel_io_channel *ch, uint64_t len)
1054 {
1055 	struct accel_buffer *buf;
1056 
1057 	buf = SLIST_FIRST(&ch->buf_pool);
1058 	if (spdk_unlikely(buf == NULL)) {
1059 		accel_update_stats(ch, retry.bufdesc, 1);
1060 		return NULL;
1061 	}
1062 
1063 	SLIST_REMOVE_HEAD(&ch->buf_pool, link);
1064 	buf->len = len;
1065 	buf->buf = NULL;
1066 	buf->seq = NULL;
1067 	buf->cb_fn = NULL;
1068 
1069 	return buf;
1070 }
1071 
1072 static inline void
1073 accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf)
1074 {
1075 	if (buf->buf != NULL) {
1076 		spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len);
1077 	}
1078 
1079 	SLIST_INSERT_HEAD(&ch->buf_pool, buf, link);
1080 }
1081 
1082 static inline struct spdk_accel_sequence *
1083 accel_sequence_get(struct accel_io_channel *ch)
1084 {
1085 	struct spdk_accel_sequence *seq;
1086 
1087 	assert(g_opts.task_count >= ch->stats.task_outstanding);
1088 
1089 	/* Sequence cannot be allocated if number of available task objects cannot satisfy required limit.
1090 	 * This is to prevent potential dead lock when few requests are pending task resource and none can
1091 	 * advance the processing. This solution should work only if there is single async operation after
1092 	 * sequence obj obtained, so assume that is possible to happen with io buffer allocation now, if
1093 	 * there are more async operations then solution should be improved. */
1094 	if (spdk_unlikely(g_opts.task_count - ch->stats.task_outstanding < ACCEL_TASKS_IN_SEQUENCE_LIMIT)) {
1095 		return NULL;
1096 	}
1097 
1098 	seq = SLIST_FIRST(&ch->seq_pool);
1099 	if (spdk_unlikely(seq == NULL)) {
1100 		accel_update_stats(ch, retry.sequence, 1);
1101 		return NULL;
1102 	}
1103 
1104 	accel_update_stats(ch, sequence_outstanding, 1);
1105 	SLIST_REMOVE_HEAD(&ch->seq_pool, link);
1106 
1107 	TAILQ_INIT(&seq->tasks);
1108 	SLIST_INIT(&seq->bounce_bufs);
1109 
1110 	seq->ch = ch;
1111 	seq->status = 0;
1112 	seq->state = ACCEL_SEQUENCE_STATE_INIT;
1113 	seq->in_process_sequence = false;
1114 
1115 	return seq;
1116 }
1117 
1118 static inline void
1119 accel_sequence_put(struct spdk_accel_sequence *seq)
1120 {
1121 	struct accel_io_channel *ch = seq->ch;
1122 	struct accel_buffer *buf;
1123 
1124 	while (!SLIST_EMPTY(&seq->bounce_bufs)) {
1125 		buf = SLIST_FIRST(&seq->bounce_bufs);
1126 		SLIST_REMOVE_HEAD(&seq->bounce_bufs, link);
1127 		accel_put_buf(seq->ch, buf);
1128 	}
1129 
1130 	assert(TAILQ_EMPTY(&seq->tasks));
1131 	seq->ch = NULL;
1132 
1133 	SLIST_INSERT_HEAD(&ch->seq_pool, seq, link);
1134 	accel_update_stats(ch, sequence_outstanding, -1);
1135 }
1136 
1137 static inline struct spdk_accel_task *
1138 accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq,
1139 			spdk_accel_step_cb cb_fn, void *cb_arg)
1140 {
1141 	struct spdk_accel_task *task;
1142 
1143 	task = _get_task(ch, NULL, NULL);
1144 	if (spdk_unlikely(task == NULL)) {
1145 		return task;
1146 	}
1147 
1148 	task->step_cb_fn = cb_fn;
1149 	task->cb_arg = cb_arg;
1150 	task->seq = seq;
1151 
1152 	return task;
1153 }
1154 
1155 int
1156 spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1157 		       struct iovec *dst_iovs, uint32_t dst_iovcnt,
1158 		       struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1159 		       struct iovec *src_iovs, uint32_t src_iovcnt,
1160 		       struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1161 		       spdk_accel_step_cb cb_fn, void *cb_arg)
1162 {
1163 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1164 	struct spdk_accel_task *task;
1165 	struct spdk_accel_sequence *seq = *pseq;
1166 
1167 	if (seq == NULL) {
1168 		seq = accel_sequence_get(accel_ch);
1169 		if (spdk_unlikely(seq == NULL)) {
1170 			return -ENOMEM;
1171 		}
1172 	}
1173 
1174 	assert(seq->ch == accel_ch);
1175 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1176 	if (spdk_unlikely(task == NULL)) {
1177 		if (*pseq == NULL) {
1178 			accel_sequence_put(seq);
1179 		}
1180 
1181 		return -ENOMEM;
1182 	}
1183 
1184 	task->dst_domain = dst_domain;
1185 	task->dst_domain_ctx = dst_domain_ctx;
1186 	task->d.iovs = dst_iovs;
1187 	task->d.iovcnt = dst_iovcnt;
1188 	task->src_domain = src_domain;
1189 	task->src_domain_ctx = src_domain_ctx;
1190 	task->s.iovs = src_iovs;
1191 	task->s.iovcnt = src_iovcnt;
1192 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1193 	task->op_code = SPDK_ACCEL_OPC_COPY;
1194 
1195 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1196 	*pseq = seq;
1197 
1198 	return 0;
1199 }
1200 
1201 int
1202 spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1203 		       void *buf, uint64_t len,
1204 		       struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern,
1205 		       spdk_accel_step_cb cb_fn, void *cb_arg)
1206 {
1207 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1208 	struct spdk_accel_task *task;
1209 	struct spdk_accel_sequence *seq = *pseq;
1210 
1211 	if (seq == NULL) {
1212 		seq = accel_sequence_get(accel_ch);
1213 		if (spdk_unlikely(seq == NULL)) {
1214 			return -ENOMEM;
1215 		}
1216 	}
1217 
1218 	assert(seq->ch == accel_ch);
1219 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1220 	if (spdk_unlikely(task == NULL)) {
1221 		if (*pseq == NULL) {
1222 			accel_sequence_put(seq);
1223 		}
1224 
1225 		return -ENOMEM;
1226 	}
1227 
1228 	memset(&task->fill_pattern, pattern, sizeof(uint64_t));
1229 
1230 	task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1231 	if (spdk_unlikely(!task->aux)) {
1232 		SPDK_ERRLOG("Fatal problem, aux data was not allocated\n");
1233 		if (*pseq == NULL) {
1234 			accel_sequence_put((seq));
1235 		}
1236 
1237 		task->seq = NULL;
1238 		_put_task(task->accel_ch, task);
1239 		assert(0);
1240 		return -ENOMEM;
1241 	}
1242 	SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1243 	task->has_aux = true;
1244 
1245 	task->d.iovs = &task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
1246 	task->d.iovs[0].iov_base = buf;
1247 	task->d.iovs[0].iov_len = len;
1248 	task->d.iovcnt = 1;
1249 	task->nbytes = len;
1250 	task->src_domain = NULL;
1251 	task->dst_domain = domain;
1252 	task->dst_domain_ctx = domain_ctx;
1253 	task->op_code = SPDK_ACCEL_OPC_FILL;
1254 
1255 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1256 	*pseq = seq;
1257 
1258 	return 0;
1259 }
1260 
1261 int
1262 spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1263 			     struct iovec *dst_iovs, size_t dst_iovcnt,
1264 			     struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1265 			     struct iovec *src_iovs, size_t src_iovcnt,
1266 			     struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1267 			     spdk_accel_step_cb cb_fn, void *cb_arg)
1268 {
1269 	return spdk_accel_append_decompress_ext(pseq, ch, dst_iovs, dst_iovcnt, dst_domain,
1270 						dst_domain_ctx, src_iovs, src_iovcnt, src_domain,
1271 						src_domain_ctx, SPDK_ACCEL_COMP_ALGO_DEFLATE,
1272 						cb_fn, cb_arg);
1273 }
1274 
1275 int
1276 spdk_accel_append_decompress_ext(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1277 				 struct iovec *dst_iovs, size_t dst_iovcnt,
1278 				 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1279 				 struct iovec *src_iovs, size_t src_iovcnt,
1280 				 struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1281 				 enum spdk_accel_comp_algo decomp_algo,
1282 				 spdk_accel_step_cb cb_fn, void *cb_arg)
1283 {
1284 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1285 	struct spdk_accel_task *task;
1286 	struct spdk_accel_sequence *seq = *pseq;
1287 	int rc;
1288 
1289 	rc = _accel_check_comp_algo(decomp_algo);
1290 	if (spdk_unlikely(rc != 0)) {
1291 		return rc;
1292 	}
1293 
1294 	if (seq == NULL) {
1295 		seq = accel_sequence_get(accel_ch);
1296 		if (spdk_unlikely(seq == NULL)) {
1297 			return -ENOMEM;
1298 		}
1299 	}
1300 
1301 	assert(seq->ch == accel_ch);
1302 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1303 	if (spdk_unlikely(task == NULL)) {
1304 		if (*pseq == NULL) {
1305 			accel_sequence_put(seq);
1306 		}
1307 
1308 		return -ENOMEM;
1309 	}
1310 
1311 	/* TODO: support output_size for chaining */
1312 	task->output_size = NULL;
1313 	task->dst_domain = dst_domain;
1314 	task->dst_domain_ctx = dst_domain_ctx;
1315 	task->d.iovs = dst_iovs;
1316 	task->d.iovcnt = dst_iovcnt;
1317 	task->src_domain = src_domain;
1318 	task->src_domain_ctx = src_domain_ctx;
1319 	task->s.iovs = src_iovs;
1320 	task->s.iovcnt = src_iovcnt;
1321 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1322 	task->op_code = SPDK_ACCEL_OPC_DECOMPRESS;
1323 	task->comp.algo = decomp_algo;
1324 
1325 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1326 	*pseq = seq;
1327 
1328 	return 0;
1329 }
1330 
1331 int
1332 spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1333 			  struct spdk_accel_crypto_key *key,
1334 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
1335 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1336 			  struct iovec *src_iovs, uint32_t src_iovcnt,
1337 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1338 			  uint64_t iv, uint32_t block_size,
1339 			  spdk_accel_step_cb cb_fn, void *cb_arg)
1340 {
1341 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1342 	struct spdk_accel_task *task;
1343 	struct spdk_accel_sequence *seq = *pseq;
1344 
1345 	assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size);
1346 
1347 	if (seq == NULL) {
1348 		seq = accel_sequence_get(accel_ch);
1349 		if (spdk_unlikely(seq == NULL)) {
1350 			return -ENOMEM;
1351 		}
1352 	}
1353 
1354 	assert(seq->ch == accel_ch);
1355 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1356 	if (spdk_unlikely(task == NULL)) {
1357 		if (*pseq == NULL) {
1358 			accel_sequence_put(seq);
1359 		}
1360 
1361 		return -ENOMEM;
1362 	}
1363 
1364 	task->crypto_key = key;
1365 	task->src_domain = src_domain;
1366 	task->src_domain_ctx = src_domain_ctx;
1367 	task->s.iovs = src_iovs;
1368 	task->s.iovcnt = src_iovcnt;
1369 	task->dst_domain = dst_domain;
1370 	task->dst_domain_ctx = dst_domain_ctx;
1371 	task->d.iovs = dst_iovs;
1372 	task->d.iovcnt = dst_iovcnt;
1373 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1374 	task->iv = iv;
1375 	task->block_size = block_size;
1376 	task->op_code = SPDK_ACCEL_OPC_ENCRYPT;
1377 
1378 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1379 	*pseq = seq;
1380 
1381 	return 0;
1382 }
1383 
1384 int
1385 spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1386 			  struct spdk_accel_crypto_key *key,
1387 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
1388 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1389 			  struct iovec *src_iovs, uint32_t src_iovcnt,
1390 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1391 			  uint64_t iv, uint32_t block_size,
1392 			  spdk_accel_step_cb cb_fn, void *cb_arg)
1393 {
1394 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1395 	struct spdk_accel_task *task;
1396 	struct spdk_accel_sequence *seq = *pseq;
1397 
1398 	assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size);
1399 
1400 	if (seq == NULL) {
1401 		seq = accel_sequence_get(accel_ch);
1402 		if (spdk_unlikely(seq == NULL)) {
1403 			return -ENOMEM;
1404 		}
1405 	}
1406 
1407 	assert(seq->ch == accel_ch);
1408 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1409 	if (spdk_unlikely(task == NULL)) {
1410 		if (*pseq == NULL) {
1411 			accel_sequence_put(seq);
1412 		}
1413 
1414 		return -ENOMEM;
1415 	}
1416 
1417 	task->crypto_key = key;
1418 	task->src_domain = src_domain;
1419 	task->src_domain_ctx = src_domain_ctx;
1420 	task->s.iovs = src_iovs;
1421 	task->s.iovcnt = src_iovcnt;
1422 	task->dst_domain = dst_domain;
1423 	task->dst_domain_ctx = dst_domain_ctx;
1424 	task->d.iovs = dst_iovs;
1425 	task->d.iovcnt = dst_iovcnt;
1426 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1427 	task->iv = iv;
1428 	task->block_size = block_size;
1429 	task->op_code = SPDK_ACCEL_OPC_DECRYPT;
1430 
1431 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1432 	*pseq = seq;
1433 
1434 	return 0;
1435 }
1436 
1437 int
1438 spdk_accel_append_crc32c(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1439 			 uint32_t *dst, struct iovec *iovs, uint32_t iovcnt,
1440 			 struct spdk_memory_domain *domain, void *domain_ctx,
1441 			 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg)
1442 {
1443 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1444 	struct spdk_accel_task *task;
1445 	struct spdk_accel_sequence *seq = *pseq;
1446 
1447 	if (seq == NULL) {
1448 		seq = accel_sequence_get(accel_ch);
1449 		if (spdk_unlikely(seq == NULL)) {
1450 			return -ENOMEM;
1451 		}
1452 	}
1453 
1454 	assert(seq->ch == accel_ch);
1455 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1456 	if (spdk_unlikely(task == NULL)) {
1457 		if (*pseq == NULL) {
1458 			accel_sequence_put(seq);
1459 		}
1460 
1461 		return -ENOMEM;
1462 	}
1463 
1464 	task->s.iovs = iovs;
1465 	task->s.iovcnt = iovcnt;
1466 	task->src_domain = domain;
1467 	task->src_domain_ctx = domain_ctx;
1468 	task->nbytes = accel_get_iovlen(iovs, iovcnt);
1469 	task->crc_dst = dst;
1470 	task->seed = seed;
1471 	task->op_code = SPDK_ACCEL_OPC_CRC32C;
1472 	task->dst_domain = NULL;
1473 
1474 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1475 	*pseq = seq;
1476 
1477 	return 0;
1478 }
1479 
1480 int
1481 spdk_accel_append_dif_verify(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1482 			     struct iovec *iovs, size_t iovcnt,
1483 			     struct spdk_memory_domain *domain, void *domain_ctx,
1484 			     uint32_t num_blocks,
1485 			     const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
1486 			     spdk_accel_step_cb cb_fn, void *cb_arg)
1487 {
1488 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1489 	struct spdk_accel_task *task;
1490 	struct spdk_accel_sequence *seq = *pseq;
1491 
1492 	if (seq == NULL) {
1493 		seq = accel_sequence_get(accel_ch);
1494 		if (spdk_unlikely(seq == NULL)) {
1495 			return -ENOMEM;
1496 		}
1497 	}
1498 
1499 	assert(seq->ch == accel_ch);
1500 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1501 	if (spdk_unlikely(task == NULL)) {
1502 		if (*pseq == NULL) {
1503 			accel_sequence_put(seq);
1504 		}
1505 
1506 		return -ENOMEM;
1507 	}
1508 
1509 	task->s.iovs = iovs;
1510 	task->s.iovcnt = iovcnt;
1511 	task->src_domain = domain;
1512 	task->src_domain_ctx = domain_ctx;
1513 	task->dst_domain = NULL;
1514 	task->dif.ctx = ctx;
1515 	task->dif.err = err;
1516 	task->dif.num_blocks = num_blocks;
1517 	task->nbytes = num_blocks * ctx->block_size;
1518 	task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY;
1519 
1520 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1521 	*pseq = seq;
1522 
1523 	return 0;
1524 }
1525 
1526 int
1527 spdk_accel_append_dif_verify_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1528 				  struct iovec *dst_iovs, size_t dst_iovcnt,
1529 				  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1530 				  struct iovec *src_iovs, size_t src_iovcnt,
1531 				  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1532 				  uint32_t num_blocks,
1533 				  const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
1534 				  spdk_accel_step_cb cb_fn, void *cb_arg)
1535 {
1536 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1537 	struct spdk_accel_task *task;
1538 	struct spdk_accel_sequence *seq = *pseq;
1539 
1540 	if (seq == NULL) {
1541 		seq = accel_sequence_get(accel_ch);
1542 		if (spdk_unlikely(seq == NULL)) {
1543 			return -ENOMEM;
1544 		}
1545 	}
1546 
1547 	assert(seq->ch == accel_ch);
1548 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1549 	if (spdk_unlikely(task == NULL)) {
1550 		if (*pseq == NULL) {
1551 			accel_sequence_put(seq);
1552 		}
1553 
1554 		return -ENOMEM;
1555 	}
1556 
1557 	task->dst_domain = dst_domain;
1558 	task->dst_domain_ctx = dst_domain_ctx;
1559 	task->d.iovs = dst_iovs;
1560 	task->d.iovcnt = dst_iovcnt;
1561 	task->src_domain = src_domain;
1562 	task->src_domain_ctx = src_domain_ctx;
1563 	task->s.iovs = src_iovs;
1564 	task->s.iovcnt = src_iovcnt;
1565 	task->dif.ctx = ctx;
1566 	task->dif.err = err;
1567 	task->dif.num_blocks = num_blocks;
1568 	task->nbytes = num_blocks * ctx->block_size;
1569 	task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY_COPY;
1570 
1571 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1572 	*pseq = seq;
1573 
1574 	return 0;
1575 }
1576 
1577 int
1578 spdk_accel_append_dif_generate(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1579 			       struct iovec *iovs, size_t iovcnt,
1580 			       struct spdk_memory_domain *domain, void *domain_ctx,
1581 			       uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
1582 			       spdk_accel_step_cb cb_fn, void *cb_arg)
1583 {
1584 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1585 	struct spdk_accel_task *task;
1586 	struct spdk_accel_sequence *seq = *pseq;
1587 
1588 	if (seq == NULL) {
1589 		seq = accel_sequence_get(accel_ch);
1590 		if (spdk_unlikely(seq == NULL)) {
1591 			return -ENOMEM;
1592 		}
1593 	}
1594 
1595 	assert(seq->ch == accel_ch);
1596 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1597 	if (spdk_unlikely(task == NULL)) {
1598 		if (*pseq == NULL) {
1599 			accel_sequence_put(seq);
1600 		}
1601 
1602 		return -ENOMEM;
1603 	}
1604 
1605 	task->s.iovs = iovs;
1606 	task->s.iovcnt = iovcnt;
1607 	task->src_domain = domain;
1608 	task->src_domain_ctx = domain_ctx;
1609 	task->dst_domain = NULL;
1610 	task->dif.ctx = ctx;
1611 	task->dif.num_blocks = num_blocks;
1612 	task->nbytes = num_blocks * ctx->block_size;
1613 	task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE;
1614 
1615 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1616 	*pseq = seq;
1617 
1618 	return 0;
1619 }
1620 
1621 int
1622 spdk_accel_append_dif_generate_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1623 				    struct iovec *dst_iovs, size_t dst_iovcnt,
1624 				    struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1625 				    struct iovec *src_iovs, size_t src_iovcnt,
1626 				    struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1627 				    uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
1628 				    spdk_accel_step_cb cb_fn, void *cb_arg)
1629 {
1630 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1631 	struct spdk_accel_task *task;
1632 	struct spdk_accel_sequence *seq = *pseq;
1633 
1634 	if (seq == NULL) {
1635 		seq = accel_sequence_get(accel_ch);
1636 		if (spdk_unlikely(seq == NULL)) {
1637 			return -ENOMEM;
1638 		}
1639 	}
1640 
1641 	assert(seq->ch == accel_ch);
1642 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1643 	if (spdk_unlikely(task == NULL)) {
1644 		if (*pseq == NULL) {
1645 			accel_sequence_put(seq);
1646 		}
1647 
1648 		return -ENOMEM;
1649 	}
1650 
1651 	task->dst_domain = dst_domain;
1652 	task->dst_domain_ctx = dst_domain_ctx;
1653 	task->d.iovs = dst_iovs;
1654 	task->d.iovcnt = dst_iovcnt;
1655 	task->src_domain = src_domain;
1656 	task->src_domain_ctx = src_domain_ctx;
1657 	task->s.iovs = src_iovs;
1658 	task->s.iovcnt = src_iovcnt;
1659 	task->dif.ctx = ctx;
1660 	task->dif.num_blocks = num_blocks;
1661 	task->nbytes = num_blocks * ctx->block_size;
1662 	task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE_COPY;
1663 
1664 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1665 	*pseq = seq;
1666 
1667 	return 0;
1668 }
1669 
1670 int
1671 spdk_accel_append_dix_generate(struct spdk_accel_sequence **seq, struct spdk_io_channel *ch,
1672 			       struct iovec *iovs, size_t iovcnt, struct spdk_memory_domain *domain,
1673 			       void *domain_ctx, struct iovec *md_iov,
1674 			       struct spdk_memory_domain *md_domain, void *md_domain_ctx,
1675 			       uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
1676 			       spdk_accel_step_cb cb_fn, void *cb_arg)
1677 {
1678 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1679 	struct spdk_accel_task *task;
1680 	struct spdk_accel_sequence *pseq = *seq;
1681 
1682 	if (pseq == NULL) {
1683 		pseq = accel_sequence_get(accel_ch);
1684 		if (spdk_unlikely(pseq == NULL)) {
1685 			return -ENOMEM;
1686 		}
1687 	}
1688 
1689 	assert(pseq->ch == accel_ch);
1690 	task = accel_sequence_get_task(accel_ch, pseq, cb_fn, cb_arg);
1691 	if (spdk_unlikely(task == NULL)) {
1692 		if (*seq == NULL) {
1693 			accel_sequence_put(pseq);
1694 		}
1695 
1696 		return -ENOMEM;
1697 	}
1698 
1699 	task->d.iovs = md_iov;
1700 	task->d.iovcnt = 1;
1701 	task->dst_domain = md_domain;
1702 	task->dst_domain_ctx = md_domain_ctx;
1703 	task->s.iovs = iovs;
1704 	task->s.iovcnt = iovcnt;
1705 	task->src_domain = domain;
1706 	task->src_domain_ctx = domain_ctx;
1707 	task->dif.ctx = ctx;
1708 	task->dif.num_blocks = num_blocks;
1709 	task->nbytes = num_blocks * ctx->block_size;
1710 	task->op_code = SPDK_ACCEL_OPC_DIX_GENERATE;
1711 
1712 	TAILQ_INSERT_TAIL(&pseq->tasks, task, seq_link);
1713 	*seq = pseq;
1714 
1715 	return 0;
1716 }
1717 
1718 int
1719 spdk_accel_append_dix_verify(struct spdk_accel_sequence **seq, struct spdk_io_channel *ch,
1720 			     struct iovec *iovs, size_t iovcnt, struct spdk_memory_domain *domain,
1721 			     void *domain_ctx, struct iovec *md_iov,
1722 			     struct spdk_memory_domain *md_domain, void *md_domain_ctx,
1723 			     uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
1724 			     struct spdk_dif_error *err, spdk_accel_step_cb cb_fn, void *cb_arg)
1725 {
1726 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1727 	struct spdk_accel_task *task;
1728 	struct spdk_accel_sequence *pseq = *seq;
1729 
1730 	if (pseq == NULL) {
1731 		pseq = accel_sequence_get(accel_ch);
1732 		if (spdk_unlikely(pseq == NULL)) {
1733 			return -ENOMEM;
1734 		}
1735 	}
1736 
1737 	assert(pseq->ch == accel_ch);
1738 	task = accel_sequence_get_task(accel_ch, pseq, cb_fn, cb_arg);
1739 	if (spdk_unlikely(task == NULL)) {
1740 		if (*seq == NULL) {
1741 			accel_sequence_put(pseq);
1742 		}
1743 
1744 		return -ENOMEM;
1745 	}
1746 
1747 	task->d.iovs = md_iov;
1748 	task->d.iovcnt = 1;
1749 	task->dst_domain = md_domain;
1750 	task->dst_domain_ctx = md_domain_ctx;
1751 	task->s.iovs = iovs;
1752 	task->s.iovcnt = iovcnt;
1753 	task->src_domain = domain;
1754 	task->src_domain_ctx = domain_ctx;
1755 	task->dif.ctx = ctx;
1756 	task->dif.err = err;
1757 	task->dif.num_blocks = num_blocks;
1758 	task->nbytes = num_blocks * ctx->block_size;
1759 	task->op_code = SPDK_ACCEL_OPC_DIX_VERIFY;
1760 
1761 	TAILQ_INSERT_TAIL(&pseq->tasks, task, seq_link);
1762 	*seq = pseq;
1763 
1764 	return 0;
1765 }
1766 
1767 int
1768 spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf,
1769 		   struct spdk_memory_domain **domain, void **domain_ctx)
1770 {
1771 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1772 	struct accel_buffer *accel_buf;
1773 
1774 	accel_buf = accel_get_buf(accel_ch, len);
1775 	if (spdk_unlikely(accel_buf == NULL)) {
1776 		return -ENOMEM;
1777 	}
1778 
1779 	accel_buf->ch = accel_ch;
1780 
1781 	/* We always return the same pointer and identify the buffers through domain_ctx */
1782 	*buf = ACCEL_BUFFER_BASE;
1783 	*domain_ctx = accel_buf;
1784 	*domain = g_accel_domain;
1785 
1786 	return 0;
1787 }
1788 
1789 void
1790 spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf,
1791 		   struct spdk_memory_domain *domain, void *domain_ctx)
1792 {
1793 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1794 	struct accel_buffer *accel_buf = domain_ctx;
1795 
1796 	assert(domain == g_accel_domain);
1797 	assert(buf == ACCEL_BUFFER_BASE);
1798 
1799 	accel_put_buf(accel_ch, accel_buf);
1800 }
1801 
1802 static void
1803 accel_sequence_complete_task(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1804 {
1805 	struct accel_io_channel *ch = seq->ch;
1806 	spdk_accel_step_cb cb_fn;
1807 	void *cb_arg;
1808 
1809 	TAILQ_REMOVE(&seq->tasks, task, seq_link);
1810 	cb_fn = task->step_cb_fn;
1811 	cb_arg = task->cb_arg;
1812 	task->seq = NULL;
1813 	if (task->has_aux) {
1814 		SLIST_INSERT_HEAD(&ch->task_aux_data_pool, task->aux, link);
1815 		task->aux = NULL;
1816 		task->has_aux = false;
1817 	}
1818 
1819 	_put_task(ch, task);
1820 
1821 	if (cb_fn != NULL) {
1822 		cb_fn(cb_arg);
1823 	}
1824 }
1825 
1826 static void
1827 accel_sequence_complete_tasks(struct spdk_accel_sequence *seq)
1828 {
1829 	struct spdk_accel_task *task;
1830 
1831 	while (!TAILQ_EMPTY(&seq->tasks)) {
1832 		task = TAILQ_FIRST(&seq->tasks);
1833 		accel_sequence_complete_task(seq, task);
1834 	}
1835 }
1836 
1837 static void
1838 accel_sequence_complete(struct spdk_accel_sequence *seq)
1839 {
1840 	spdk_accel_completion_cb cb_fn = seq->cb_fn;
1841 	void *cb_arg = seq->cb_arg;
1842 	int status = seq->status;
1843 
1844 	SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, status);
1845 
1846 	accel_update_stats(seq->ch, sequence_executed, 1);
1847 	if (spdk_unlikely(status != 0)) {
1848 		accel_update_stats(seq->ch, sequence_failed, 1);
1849 	}
1850 
1851 	/* First notify all users that appended operations to this sequence */
1852 	accel_sequence_complete_tasks(seq);
1853 	accel_sequence_put(seq);
1854 
1855 	/* Then notify the user that finished the sequence */
1856 	cb_fn(cb_arg, status);
1857 }
1858 
1859 static void
1860 accel_update_virt_iov(struct iovec *diov, struct iovec *siov, struct accel_buffer *accel_buf)
1861 {
1862 	uintptr_t offset;
1863 
1864 	offset = (uintptr_t)siov->iov_base & ACCEL_BUFFER_OFFSET_MASK;
1865 	assert(offset < accel_buf->len);
1866 
1867 	diov->iov_base = (char *)accel_buf->buf + offset;
1868 	diov->iov_len = siov->iov_len;
1869 }
1870 
1871 static void
1872 accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf)
1873 {
1874 	struct spdk_accel_task *task;
1875 	struct iovec *iov;
1876 
1877 	/* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks
1878 	 * in a sequence that were using it.
1879 	 */
1880 	TAILQ_FOREACH(task, &seq->tasks, seq_link) {
1881 		if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) {
1882 			if (!task->has_aux) {
1883 				task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1884 				assert(task->aux && "Can't allocate aux data structure");
1885 				task->has_aux = true;
1886 				SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1887 			}
1888 
1889 			iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_SRC];
1890 			assert(task->s.iovcnt == 1);
1891 			accel_update_virt_iov(iov, &task->s.iovs[0], buf);
1892 			task->src_domain = NULL;
1893 			task->s.iovs = iov;
1894 		}
1895 		if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) {
1896 			if (!task->has_aux) {
1897 				task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1898 				assert(task->aux && "Can't allocate aux data structure");
1899 				task->has_aux = true;
1900 				SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1901 			}
1902 
1903 			iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_DST];
1904 			assert(task->d.iovcnt == 1);
1905 			accel_update_virt_iov(iov, &task->d.iovs[0], buf);
1906 			task->dst_domain = NULL;
1907 			task->d.iovs = iov;
1908 		}
1909 	}
1910 }
1911 
1912 static void accel_process_sequence(struct spdk_accel_sequence *seq);
1913 
1914 static void
1915 accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf)
1916 {
1917 	struct accel_buffer *accel_buf;
1918 
1919 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1920 
1921 	assert(accel_buf->seq != NULL);
1922 	assert(accel_buf->buf == NULL);
1923 	accel_buf->buf = buf;
1924 
1925 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1926 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1927 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1928 	accel_process_sequence(accel_buf->seq);
1929 }
1930 
1931 static bool
1932 accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf,
1933 			 spdk_iobuf_get_cb cb_fn)
1934 {
1935 	struct accel_io_channel *ch = seq->ch;
1936 
1937 	assert(buf->seq == NULL);
1938 
1939 	buf->seq = seq;
1940 
1941 	/* Buffer might be already allocated by memory domain translation. */
1942 	if (buf->buf) {
1943 		return true;
1944 	}
1945 
1946 	buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn);
1947 	if (spdk_unlikely(buf->buf == NULL)) {
1948 		accel_update_stats(ch, retry.iobuf, 1);
1949 		return false;
1950 	}
1951 
1952 	return true;
1953 }
1954 
1955 static bool
1956 accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1957 {
1958 	/* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to
1959 	 * NULL */
1960 	if (task->src_domain == g_accel_domain) {
1961 		if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx,
1962 					      accel_iobuf_get_virtbuf_cb)) {
1963 			return false;
1964 		}
1965 
1966 		accel_sequence_set_virtbuf(seq, task->src_domain_ctx);
1967 	}
1968 
1969 	if (task->dst_domain == g_accel_domain) {
1970 		if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx,
1971 					      accel_iobuf_get_virtbuf_cb)) {
1972 			return false;
1973 		}
1974 
1975 		accel_sequence_set_virtbuf(seq, task->dst_domain_ctx);
1976 	}
1977 
1978 	return true;
1979 }
1980 
1981 static void
1982 accel_sequence_get_buf_cb(struct spdk_iobuf_entry *entry, void *buf)
1983 {
1984 	struct accel_buffer *accel_buf;
1985 
1986 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1987 
1988 	assert(accel_buf->seq != NULL);
1989 	assert(accel_buf->buf == NULL);
1990 	accel_buf->buf = buf;
1991 
1992 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1993 	accel_buf->cb_fn(accel_buf->seq, accel_buf->cb_ctx);
1994 }
1995 
1996 bool
1997 spdk_accel_alloc_sequence_buf(struct spdk_accel_sequence *seq, void *buf,
1998 			      struct spdk_memory_domain *domain, void *domain_ctx,
1999 			      spdk_accel_sequence_get_buf_cb cb_fn, void *cb_ctx)
2000 {
2001 	struct accel_buffer *accel_buf = domain_ctx;
2002 
2003 	assert(domain == g_accel_domain);
2004 	accel_buf->cb_fn = cb_fn;
2005 	accel_buf->cb_ctx = cb_ctx;
2006 
2007 	if (!accel_sequence_alloc_buf(seq, accel_buf, accel_sequence_get_buf_cb)) {
2008 		return false;
2009 	}
2010 
2011 	accel_sequence_set_virtbuf(seq, accel_buf);
2012 
2013 	return true;
2014 }
2015 
2016 struct spdk_accel_task *
2017 spdk_accel_sequence_first_task(struct spdk_accel_sequence *seq)
2018 {
2019 	return TAILQ_FIRST(&seq->tasks);
2020 }
2021 
2022 struct spdk_accel_task *
2023 spdk_accel_sequence_next_task(struct spdk_accel_task *task)
2024 {
2025 	return TAILQ_NEXT(task, seq_link);
2026 }
2027 
2028 static inline void
2029 accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs,
2030 			uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx,
2031 			struct accel_buffer *buf)
2032 {
2033 	bounce->orig_iovs = *iovs;
2034 	bounce->orig_iovcnt = *iovcnt;
2035 	bounce->orig_domain = *domain;
2036 	bounce->orig_domain_ctx = *domain_ctx;
2037 	bounce->iov.iov_base = buf->buf;
2038 	bounce->iov.iov_len = buf->len;
2039 
2040 	*iovs = &bounce->iov;
2041 	*iovcnt = 1;
2042 	*domain = NULL;
2043 }
2044 
2045 static void
2046 accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
2047 {
2048 	struct spdk_accel_task *task;
2049 	struct accel_buffer *accel_buf;
2050 
2051 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
2052 	assert(accel_buf->buf == NULL);
2053 	accel_buf->buf = buf;
2054 
2055 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
2056 	assert(task != NULL);
2057 
2058 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
2059 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
2060 	assert(task->aux);
2061 	assert(task->has_aux);
2062 	accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain,
2063 				&task->src_domain_ctx, accel_buf);
2064 	accel_process_sequence(accel_buf->seq);
2065 }
2066 
2067 static void
2068 accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
2069 {
2070 	struct spdk_accel_task *task;
2071 	struct accel_buffer *accel_buf;
2072 
2073 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
2074 	assert(accel_buf->buf == NULL);
2075 	accel_buf->buf = buf;
2076 
2077 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
2078 	assert(task != NULL);
2079 
2080 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
2081 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
2082 	assert(task->aux);
2083 	assert(task->has_aux);
2084 	accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain,
2085 				&task->dst_domain_ctx, accel_buf);
2086 	accel_process_sequence(accel_buf->seq);
2087 }
2088 
2089 static int
2090 accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
2091 {
2092 	struct accel_buffer *buf;
2093 
2094 	if (task->src_domain != NULL) {
2095 		/* By the time we're here, accel buffers should have been allocated */
2096 		assert(task->src_domain != g_accel_domain);
2097 
2098 		if (!task->has_aux) {
2099 			task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
2100 			if (spdk_unlikely(!task->aux)) {
2101 				SPDK_ERRLOG("Can't allocate aux data structure\n");
2102 				assert(0);
2103 				return -EAGAIN;
2104 			}
2105 			task->has_aux = true;
2106 			SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
2107 		}
2108 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt));
2109 		if (buf == NULL) {
2110 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
2111 			return -ENOMEM;
2112 		}
2113 
2114 		SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link);
2115 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) {
2116 			return -EAGAIN;
2117 		}
2118 
2119 		accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt,
2120 					&task->src_domain, &task->src_domain_ctx, buf);
2121 	}
2122 
2123 	if (task->dst_domain != NULL) {
2124 		/* By the time we're here, accel buffers should have been allocated */
2125 		assert(task->dst_domain != g_accel_domain);
2126 
2127 		if (!task->has_aux) {
2128 			task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
2129 			if (spdk_unlikely(!task->aux)) {
2130 				SPDK_ERRLOG("Can't allocate aux data structure\n");
2131 				assert(0);
2132 				return -EAGAIN;
2133 			}
2134 			task->has_aux = true;
2135 			SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
2136 		}
2137 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt));
2138 		if (buf == NULL) {
2139 			/* The src buffer will be released when a sequence is completed */
2140 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
2141 			return -ENOMEM;
2142 		}
2143 
2144 		SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link);
2145 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) {
2146 			return -EAGAIN;
2147 		}
2148 
2149 		accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt,
2150 					&task->dst_domain, &task->dst_domain_ctx, buf);
2151 	}
2152 
2153 	return 0;
2154 }
2155 
2156 static void
2157 accel_task_pull_data_cb(void *ctx, int status)
2158 {
2159 	struct spdk_accel_sequence *seq = ctx;
2160 
2161 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
2162 	if (spdk_likely(status == 0)) {
2163 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
2164 	} else {
2165 		accel_sequence_set_fail(seq, status);
2166 	}
2167 
2168 	accel_process_sequence(seq);
2169 }
2170 
2171 static void
2172 accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
2173 {
2174 	int rc;
2175 
2176 	assert(task->has_aux);
2177 	assert(task->aux);
2178 	assert(task->aux->bounce.s.orig_iovs != NULL);
2179 	assert(task->aux->bounce.s.orig_domain != NULL);
2180 	assert(task->aux->bounce.s.orig_domain != g_accel_domain);
2181 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
2182 
2183 	rc = spdk_memory_domain_pull_data(task->aux->bounce.s.orig_domain,
2184 					  task->aux->bounce.s.orig_domain_ctx,
2185 					  task->aux->bounce.s.orig_iovs, task->aux->bounce.s.orig_iovcnt,
2186 					  task->s.iovs, task->s.iovcnt,
2187 					  accel_task_pull_data_cb, seq);
2188 	if (spdk_unlikely(rc != 0)) {
2189 		SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n",
2190 			    spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc);
2191 		accel_sequence_set_fail(seq, rc);
2192 	}
2193 }
2194 
2195 static void
2196 accel_task_push_data_cb(void *ctx, int status)
2197 {
2198 	struct spdk_accel_sequence *seq = ctx;
2199 
2200 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
2201 	if (spdk_likely(status == 0)) {
2202 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
2203 	} else {
2204 		accel_sequence_set_fail(seq, status);
2205 	}
2206 
2207 	accel_process_sequence(seq);
2208 }
2209 
2210 static void
2211 accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
2212 {
2213 	int rc;
2214 
2215 	assert(task->has_aux);
2216 	assert(task->aux);
2217 	assert(task->aux->bounce.d.orig_iovs != NULL);
2218 	assert(task->aux->bounce.d.orig_domain != NULL);
2219 	assert(task->aux->bounce.d.orig_domain != g_accel_domain);
2220 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
2221 
2222 	rc = spdk_memory_domain_push_data(task->aux->bounce.d.orig_domain,
2223 					  task->aux->bounce.d.orig_domain_ctx,
2224 					  task->aux->bounce.d.orig_iovs, task->aux->bounce.d.orig_iovcnt,
2225 					  task->d.iovs, task->d.iovcnt,
2226 					  accel_task_push_data_cb, seq);
2227 	if (spdk_unlikely(rc != 0)) {
2228 		SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n",
2229 			    spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc);
2230 		accel_sequence_set_fail(seq, rc);
2231 	}
2232 }
2233 
2234 static void
2235 accel_process_sequence(struct spdk_accel_sequence *seq)
2236 {
2237 	struct accel_io_channel *accel_ch = seq->ch;
2238 	struct spdk_accel_task *task;
2239 	enum accel_sequence_state state;
2240 	int rc;
2241 
2242 	/* Prevent recursive calls to this function */
2243 	if (spdk_unlikely(seq->in_process_sequence)) {
2244 		return;
2245 	}
2246 	seq->in_process_sequence = true;
2247 
2248 	task = TAILQ_FIRST(&seq->tasks);
2249 	do {
2250 		state = seq->state;
2251 		switch (state) {
2252 		case ACCEL_SEQUENCE_STATE_INIT:
2253 			if (g_accel_driver != NULL) {
2254 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS);
2255 				break;
2256 			}
2257 		/* Fall through */
2258 		case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF:
2259 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
2260 			if (!accel_sequence_check_virtbuf(seq, task)) {
2261 				/* We couldn't allocate a buffer, wait until one is available */
2262 				break;
2263 			}
2264 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
2265 		/* Fall through */
2266 		case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF:
2267 			/* If a module supports memory domains, we don't need to allocate bounce
2268 			 * buffers */
2269 			if (g_modules_opc[task->op_code].supports_memory_domains) {
2270 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
2271 				break;
2272 			}
2273 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
2274 			rc = accel_sequence_check_bouncebuf(seq, task);
2275 			if (spdk_unlikely(rc != 0)) {
2276 				/* We couldn't allocate a buffer, wait until one is available */
2277 				if (rc == -EAGAIN) {
2278 					break;
2279 				}
2280 				accel_sequence_set_fail(seq, rc);
2281 				break;
2282 			}
2283 			if (task->has_aux && task->s.iovs == &task->aux->bounce.s.iov) {
2284 				assert(task->aux->bounce.s.orig_iovs);
2285 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA);
2286 				break;
2287 			}
2288 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
2289 		/* Fall through */
2290 		case ACCEL_SEQUENCE_STATE_EXEC_TASK:
2291 			SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n",
2292 				      g_opcode_strings[task->op_code], seq);
2293 
2294 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK);
2295 			rc = accel_submit_task(accel_ch, task);
2296 			if (spdk_unlikely(rc != 0)) {
2297 				SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n",
2298 					    g_opcode_strings[task->op_code], seq);
2299 				accel_sequence_set_fail(seq, rc);
2300 			}
2301 			break;
2302 		case ACCEL_SEQUENCE_STATE_PULL_DATA:
2303 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
2304 			accel_task_pull_data(seq, task);
2305 			break;
2306 		case ACCEL_SEQUENCE_STATE_COMPLETE_TASK:
2307 			if (task->has_aux && task->d.iovs == &task->aux->bounce.d.iov) {
2308 				assert(task->aux->bounce.d.orig_iovs);
2309 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA);
2310 				break;
2311 			}
2312 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
2313 			break;
2314 		case ACCEL_SEQUENCE_STATE_PUSH_DATA:
2315 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
2316 			accel_task_push_data(seq, task);
2317 			break;
2318 		case ACCEL_SEQUENCE_STATE_NEXT_TASK:
2319 			accel_sequence_complete_task(seq, task);
2320 			/* Check if there are any remaining tasks */
2321 			task = TAILQ_FIRST(&seq->tasks);
2322 			if (task == NULL) {
2323 				/* Immediately return here to make sure we don't touch the sequence
2324 				 * after it's completed */
2325 				accel_sequence_complete(seq);
2326 				return;
2327 			}
2328 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT);
2329 			break;
2330 		case ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS:
2331 			assert(!TAILQ_EMPTY(&seq->tasks));
2332 
2333 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS);
2334 			rc = g_accel_driver->execute_sequence(accel_ch->driver_channel, seq);
2335 			if (spdk_unlikely(rc != 0)) {
2336 				SPDK_ERRLOG("Failed to execute sequence: %p using driver: %s\n",
2337 					    seq, g_accel_driver->name);
2338 				accel_sequence_set_fail(seq, rc);
2339 			}
2340 			break;
2341 		case ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS:
2342 			/* Get the task again, as the driver might have completed some tasks
2343 			 * synchronously */
2344 			task = TAILQ_FIRST(&seq->tasks);
2345 			if (task == NULL) {
2346 				/* Immediately return here to make sure we don't touch the sequence
2347 				 * after it's completed */
2348 				accel_sequence_complete(seq);
2349 				return;
2350 			}
2351 			/* We don't want to execute the next task through the driver, so we
2352 			 * explicitly omit the INIT state here */
2353 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
2354 			break;
2355 		case ACCEL_SEQUENCE_STATE_ERROR:
2356 			/* Immediately return here to make sure we don't touch the sequence
2357 			 * after it's completed */
2358 			assert(seq->status != 0);
2359 			accel_sequence_complete(seq);
2360 			return;
2361 		case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF:
2362 		case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF:
2363 		case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA:
2364 		case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
2365 		case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA:
2366 		case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS:
2367 			break;
2368 		default:
2369 			assert(0 && "bad state");
2370 			break;
2371 		}
2372 	} while (seq->state != state);
2373 
2374 	seq->in_process_sequence = false;
2375 }
2376 
2377 static void
2378 accel_sequence_task_cb(struct spdk_accel_sequence *seq, struct spdk_accel_task *task, int status)
2379 {
2380 	switch (seq->state) {
2381 	case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
2382 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK);
2383 		if (spdk_unlikely(status != 0)) {
2384 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n",
2385 				    g_opcode_strings[task->op_code], seq);
2386 			accel_sequence_set_fail(seq, status);
2387 		}
2388 
2389 		accel_process_sequence(seq);
2390 		break;
2391 	case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS:
2392 		assert(g_accel_driver != NULL);
2393 		/* Immediately remove the task from the outstanding list to make sure the next call
2394 		 * to spdk_accel_sequence_first_task() doesn't return it */
2395 		accel_sequence_complete_task(seq, task);
2396 		if (spdk_unlikely(status != 0)) {
2397 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p through "
2398 				    "driver: %s\n", g_opcode_strings[task->op_code], seq,
2399 				    g_accel_driver->name);
2400 			/* Update status without using accel_sequence_set_fail() to avoid changing
2401 			 * seq's state to ERROR until driver calls spdk_accel_sequence_continue() */
2402 			seq->status = status;
2403 		}
2404 		break;
2405 	default:
2406 		assert(0 && "bad state");
2407 		break;
2408 	}
2409 }
2410 
2411 void
2412 spdk_accel_sequence_continue(struct spdk_accel_sequence *seq)
2413 {
2414 	assert(g_accel_driver != NULL);
2415 	assert(seq->state == ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS);
2416 
2417 	if (spdk_likely(seq->status == 0)) {
2418 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS);
2419 	} else {
2420 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
2421 	}
2422 
2423 	accel_process_sequence(seq);
2424 }
2425 
2426 static bool
2427 accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt)
2428 {
2429 	/* For now, just do a dumb check that the iovecs arrays are exactly the same */
2430 	if (iovacnt != iovbcnt) {
2431 		return false;
2432 	}
2433 
2434 	return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0;
2435 }
2436 
2437 static bool
2438 accel_task_set_dstbuf(struct spdk_accel_task *task, struct spdk_accel_task *next)
2439 {
2440 	struct spdk_accel_task *prev;
2441 
2442 	switch (task->op_code) {
2443 	case SPDK_ACCEL_OPC_DECOMPRESS:
2444 	case SPDK_ACCEL_OPC_FILL:
2445 	case SPDK_ACCEL_OPC_ENCRYPT:
2446 	case SPDK_ACCEL_OPC_DECRYPT:
2447 	case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
2448 	case SPDK_ACCEL_OPC_DIF_VERIFY_COPY:
2449 		if (task->dst_domain != next->src_domain) {
2450 			return false;
2451 		}
2452 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
2453 					next->s.iovs, next->s.iovcnt)) {
2454 			return false;
2455 		}
2456 		task->d.iovs = next->d.iovs;
2457 		task->d.iovcnt = next->d.iovcnt;
2458 		task->dst_domain = next->dst_domain;
2459 		task->dst_domain_ctx = next->dst_domain_ctx;
2460 		break;
2461 	case SPDK_ACCEL_OPC_CRC32C:
2462 	case SPDK_ACCEL_OPC_DIX_GENERATE:
2463 	case SPDK_ACCEL_OPC_DIX_VERIFY:
2464 		/* crc32 and dix_generate/verify are special, because they do not have a dst buffer */
2465 		if (task->src_domain != next->src_domain) {
2466 			return false;
2467 		}
2468 		if (!accel_compare_iovs(task->s.iovs, task->s.iovcnt,
2469 					next->s.iovs, next->s.iovcnt)) {
2470 			return false;
2471 		}
2472 		/* We can only change operation's buffer if we can change previous task's buffer */
2473 		prev = TAILQ_PREV(task, accel_sequence_tasks, seq_link);
2474 		if (prev == NULL) {
2475 			return false;
2476 		}
2477 		if (!accel_task_set_dstbuf(prev, next)) {
2478 			return false;
2479 		}
2480 		task->s.iovs = next->d.iovs;
2481 		task->s.iovcnt = next->d.iovcnt;
2482 		task->src_domain = next->dst_domain;
2483 		task->src_domain_ctx = next->dst_domain_ctx;
2484 		break;
2485 	default:
2486 		return false;
2487 	}
2488 
2489 	return true;
2490 }
2491 
2492 static void
2493 accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task,
2494 			   struct spdk_accel_task **next_task)
2495 {
2496 	struct spdk_accel_task *next = *next_task;
2497 
2498 	switch (task->op_code) {
2499 	case SPDK_ACCEL_OPC_COPY:
2500 		/* We only allow changing src of operations that actually have a src, e.g. we never
2501 		 * do it for fill.  Theoretically, it is possible, but we'd have to be careful to
2502 		 * change the src of the operation after fill (which in turn could also be a fill).
2503 		 * So, for the sake of simplicity, skip this type of operations for now.
2504 		 */
2505 		if (next->op_code != SPDK_ACCEL_OPC_DECOMPRESS &&
2506 		    next->op_code != SPDK_ACCEL_OPC_COPY &&
2507 		    next->op_code != SPDK_ACCEL_OPC_ENCRYPT &&
2508 		    next->op_code != SPDK_ACCEL_OPC_DECRYPT &&
2509 		    next->op_code != SPDK_ACCEL_OPC_COPY_CRC32C &&
2510 		    next->op_code != SPDK_ACCEL_OPC_DIF_GENERATE_COPY &&
2511 		    next->op_code != SPDK_ACCEL_OPC_DIF_VERIFY_COPY) {
2512 			break;
2513 		}
2514 		if (task->dst_domain != next->src_domain) {
2515 			break;
2516 		}
2517 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
2518 					next->s.iovs, next->s.iovcnt)) {
2519 			break;
2520 		}
2521 		next->s.iovs = task->s.iovs;
2522 		next->s.iovcnt = task->s.iovcnt;
2523 		next->src_domain = task->src_domain;
2524 		next->src_domain_ctx = task->src_domain_ctx;
2525 		accel_sequence_complete_task(seq, task);
2526 		break;
2527 	case SPDK_ACCEL_OPC_DECOMPRESS:
2528 	case SPDK_ACCEL_OPC_FILL:
2529 	case SPDK_ACCEL_OPC_ENCRYPT:
2530 	case SPDK_ACCEL_OPC_DECRYPT:
2531 	case SPDK_ACCEL_OPC_CRC32C:
2532 	case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
2533 	case SPDK_ACCEL_OPC_DIF_VERIFY_COPY:
2534 	case SPDK_ACCEL_OPC_DIX_GENERATE:
2535 	case SPDK_ACCEL_OPC_DIX_VERIFY:
2536 		/* We can only merge tasks when one of them is a copy */
2537 		if (next->op_code != SPDK_ACCEL_OPC_COPY) {
2538 			break;
2539 		}
2540 		if (!accel_task_set_dstbuf(task, next)) {
2541 			break;
2542 		}
2543 		/* We're removing next_task from the tasks queue, so we need to update its pointer,
2544 		 * so that the TAILQ_FOREACH_SAFE() loop below works correctly */
2545 		*next_task = TAILQ_NEXT(next, seq_link);
2546 		accel_sequence_complete_task(seq, next);
2547 		break;
2548 	default:
2549 		assert(0 && "bad opcode");
2550 		break;
2551 	}
2552 }
2553 
2554 void
2555 spdk_accel_sequence_finish(struct spdk_accel_sequence *seq,
2556 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
2557 {
2558 	struct spdk_accel_task *task, *next;
2559 
2560 	/* Try to remove any copy operations if possible */
2561 	TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) {
2562 		if (next == NULL) {
2563 			break;
2564 		}
2565 		accel_sequence_merge_tasks(seq, task, &next);
2566 	}
2567 
2568 	seq->cb_fn = cb_fn;
2569 	seq->cb_arg = cb_arg;
2570 
2571 	accel_process_sequence(seq);
2572 }
2573 
2574 void
2575 spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq)
2576 {
2577 	struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks);
2578 	struct spdk_accel_task *task;
2579 
2580 	TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link);
2581 
2582 	while (!TAILQ_EMPTY(&tasks)) {
2583 		task = TAILQ_FIRST(&tasks);
2584 		TAILQ_REMOVE(&tasks, task, seq_link);
2585 		TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link);
2586 	}
2587 }
2588 
2589 void
2590 spdk_accel_sequence_abort(struct spdk_accel_sequence *seq)
2591 {
2592 	if (seq == NULL) {
2593 		return;
2594 	}
2595 
2596 	accel_sequence_complete_tasks(seq);
2597 	accel_sequence_put(seq);
2598 }
2599 
2600 struct spdk_memory_domain *
2601 spdk_accel_get_memory_domain(void)
2602 {
2603 	return g_accel_domain;
2604 }
2605 
2606 static struct spdk_accel_module_if *
2607 _module_find_by_name(const char *name)
2608 {
2609 	struct spdk_accel_module_if *accel_module = NULL;
2610 
2611 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2612 		if (strcmp(name, accel_module->name) == 0) {
2613 			break;
2614 		}
2615 	}
2616 
2617 	return accel_module;
2618 }
2619 
2620 static inline struct spdk_accel_crypto_key *
2621 _accel_crypto_key_get(const char *name)
2622 {
2623 	struct spdk_accel_crypto_key *key;
2624 
2625 	assert(spdk_spin_held(&g_keyring_spin));
2626 
2627 	TAILQ_FOREACH(key, &g_keyring, link) {
2628 		if (strcmp(name, key->param.key_name) == 0) {
2629 			return key;
2630 		}
2631 	}
2632 
2633 	return NULL;
2634 }
2635 
2636 static void
2637 accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key)
2638 {
2639 	if (key->param.hex_key) {
2640 		spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2);
2641 		free(key->param.hex_key);
2642 	}
2643 	if (key->param.hex_key2) {
2644 		spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2);
2645 		free(key->param.hex_key2);
2646 	}
2647 	free(key->param.tweak_mode);
2648 	free(key->param.key_name);
2649 	free(key->param.cipher);
2650 	if (key->key) {
2651 		spdk_memset_s(key->key, key->key_size, 0, key->key_size);
2652 		free(key->key);
2653 	}
2654 	if (key->key2) {
2655 		spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size);
2656 		free(key->key2);
2657 	}
2658 	free(key);
2659 }
2660 
2661 static void
2662 accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key)
2663 {
2664 	assert(key->module_if);
2665 	assert(key->module_if->crypto_key_deinit);
2666 
2667 	key->module_if->crypto_key_deinit(key);
2668 	accel_crypto_key_free_mem(key);
2669 }
2670 
2671 /*
2672  * This function mitigates a timing side channel which could be caused by using strcmp()
2673  * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in
2674  * the article [1] for more details
2675  * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html
2676  */
2677 static bool
2678 accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len)
2679 {
2680 	size_t i;
2681 	volatile size_t x = k1_len ^ k2_len;
2682 
2683 	for (i = 0; ((i < k1_len) & (i < k2_len)); i++) {
2684 		x |= k1[i] ^ k2[i];
2685 	}
2686 
2687 	return x == 0;
2688 }
2689 
2690 static const char *g_tweak_modes[] = {
2691 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA] = "SIMPLE_LBA",
2692 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_JOIN_NEG_LBA_WITH_LBA] = "JOIN_NEG_LBA_WITH_LBA",
2693 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_FULL_LBA] = "INCR_512_FULL_LBA",
2694 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_UPPER_LBA] = "INCR_512_UPPER_LBA",
2695 };
2696 
2697 static const char *g_ciphers[] = {
2698 	[SPDK_ACCEL_CIPHER_AES_CBC] = "AES_CBC",
2699 	[SPDK_ACCEL_CIPHER_AES_XTS] = "AES_XTS",
2700 };
2701 
2702 int
2703 spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param)
2704 {
2705 	struct spdk_accel_module_if *module;
2706 	struct spdk_accel_crypto_key *key;
2707 	size_t hex_key_size, hex_key2_size;
2708 	bool found = false;
2709 	size_t i;
2710 	int rc;
2711 
2712 	if (!param || !param->hex_key || !param->cipher || !param->key_name) {
2713 		return -EINVAL;
2714 	}
2715 
2716 	if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) {
2717 		/* hardly ever possible, but let's check and warn the user */
2718 		SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n");
2719 	}
2720 	module = g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module;
2721 
2722 	if (!module) {
2723 		SPDK_ERRLOG("No accel module found assigned for crypto operation\n");
2724 		return -ENOENT;
2725 	}
2726 
2727 	if (!module->crypto_key_init || !module->crypto_supports_cipher) {
2728 		SPDK_ERRLOG("Module %s doesn't support crypto operations\n", module->name);
2729 		return -ENOTSUP;
2730 	}
2731 
2732 	key = calloc(1, sizeof(*key));
2733 	if (!key) {
2734 		return -ENOMEM;
2735 	}
2736 
2737 	key->param.key_name = strdup(param->key_name);
2738 	if (!key->param.key_name) {
2739 		rc = -ENOMEM;
2740 		goto error;
2741 	}
2742 
2743 	for (i = 0; i < SPDK_COUNTOF(g_ciphers); ++i) {
2744 		assert(g_ciphers[i]);
2745 
2746 		if (strncmp(param->cipher, g_ciphers[i], strlen(g_ciphers[i])) == 0) {
2747 			key->cipher = i;
2748 			found = true;
2749 			break;
2750 		}
2751 	}
2752 
2753 	if (!found) {
2754 		SPDK_ERRLOG("Failed to parse cipher\n");
2755 		rc = -EINVAL;
2756 		goto error;
2757 	}
2758 
2759 	key->param.cipher = strdup(param->cipher);
2760 	if (!key->param.cipher) {
2761 		rc = -ENOMEM;
2762 		goto error;
2763 	}
2764 
2765 	hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2766 	if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
2767 		SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2768 		rc = -EINVAL;
2769 		goto error;
2770 	}
2771 
2772 	if (hex_key_size == 0) {
2773 		SPDK_ERRLOG("key1 size cannot be 0\n");
2774 		rc = -EINVAL;
2775 		goto error;
2776 	}
2777 
2778 	key->param.hex_key = strdup(param->hex_key);
2779 	if (!key->param.hex_key) {
2780 		rc = -ENOMEM;
2781 		goto error;
2782 	}
2783 
2784 	key->key_size = hex_key_size / 2;
2785 	key->key = spdk_unhexlify(key->param.hex_key);
2786 	if (!key->key) {
2787 		SPDK_ERRLOG("Failed to unhexlify key1\n");
2788 		rc = -EINVAL;
2789 		goto error;
2790 	}
2791 
2792 	if (param->hex_key2) {
2793 		hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2794 		if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
2795 			SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2796 			rc = -EINVAL;
2797 			goto error;
2798 		}
2799 
2800 		if (hex_key2_size == 0) {
2801 			SPDK_ERRLOG("key2 size cannot be 0\n");
2802 			rc = -EINVAL;
2803 			goto error;
2804 		}
2805 
2806 		key->param.hex_key2 = strdup(param->hex_key2);
2807 		if (!key->param.hex_key2) {
2808 			rc = -ENOMEM;
2809 			goto error;
2810 		}
2811 
2812 		key->key2_size = hex_key2_size / 2;
2813 		key->key2 = spdk_unhexlify(key->param.hex_key2);
2814 		if (!key->key2) {
2815 			SPDK_ERRLOG("Failed to unhexlify key2\n");
2816 			rc = -EINVAL;
2817 			goto error;
2818 		}
2819 	}
2820 
2821 	key->tweak_mode = ACCEL_CRYPTO_TWEAK_MODE_DEFAULT;
2822 	if (param->tweak_mode) {
2823 		found = false;
2824 
2825 		key->param.tweak_mode = strdup(param->tweak_mode);
2826 		if (!key->param.tweak_mode) {
2827 			rc = -ENOMEM;
2828 			goto error;
2829 		}
2830 
2831 		for (i = 0; i < SPDK_COUNTOF(g_tweak_modes); ++i) {
2832 			assert(g_tweak_modes[i]);
2833 
2834 			if (strncmp(param->tweak_mode, g_tweak_modes[i], strlen(g_tweak_modes[i])) == 0) {
2835 				key->tweak_mode = i;
2836 				found = true;
2837 				break;
2838 			}
2839 		}
2840 
2841 		if (!found) {
2842 			SPDK_ERRLOG("Failed to parse tweak mode\n");
2843 			rc = -EINVAL;
2844 			goto error;
2845 		}
2846 	}
2847 
2848 	if ((!module->crypto_supports_tweak_mode && key->tweak_mode != ACCEL_CRYPTO_TWEAK_MODE_DEFAULT) ||
2849 	    (module->crypto_supports_tweak_mode && !module->crypto_supports_tweak_mode(key->tweak_mode))) {
2850 		SPDK_ERRLOG("Module %s doesn't support %s tweak mode\n", module->name,
2851 			    g_tweak_modes[key->tweak_mode]);
2852 		rc = -EINVAL;
2853 		goto error;
2854 	}
2855 
2856 	if (!module->crypto_supports_cipher(key->cipher, key->key_size)) {
2857 		SPDK_ERRLOG("Module %s doesn't support %s cipher with %zu key size\n", module->name,
2858 			    g_ciphers[key->cipher], key->key_size);
2859 		rc = -EINVAL;
2860 		goto error;
2861 	}
2862 
2863 	if (key->cipher == SPDK_ACCEL_CIPHER_AES_XTS) {
2864 		if (!key->key2) {
2865 			SPDK_ERRLOG("%s key2 is missing\n", g_ciphers[key->cipher]);
2866 			rc = -EINVAL;
2867 			goto error;
2868 		}
2869 
2870 		if (key->key_size != key->key2_size) {
2871 			SPDK_ERRLOG("%s key size %zu is not equal to key2 size %zu\n", g_ciphers[key->cipher],
2872 				    key->key_size,
2873 				    key->key2_size);
2874 			rc = -EINVAL;
2875 			goto error;
2876 		}
2877 
2878 		if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) {
2879 			SPDK_ERRLOG("%s identical keys are not secure\n", g_ciphers[key->cipher]);
2880 			rc = -EINVAL;
2881 			goto error;
2882 		}
2883 	}
2884 
2885 	if (key->cipher == SPDK_ACCEL_CIPHER_AES_CBC) {
2886 		if (key->key2_size) {
2887 			SPDK_ERRLOG("%s doesn't use key2\n", g_ciphers[key->cipher]);
2888 			rc = -EINVAL;
2889 			goto error;
2890 		}
2891 	}
2892 
2893 	key->module_if = module;
2894 
2895 	spdk_spin_lock(&g_keyring_spin);
2896 	if (_accel_crypto_key_get(param->key_name)) {
2897 		rc = -EEXIST;
2898 	} else {
2899 		rc = module->crypto_key_init(key);
2900 		if (rc) {
2901 			SPDK_ERRLOG("Module %s failed to initialize crypto key\n", module->name);
2902 		} else {
2903 			TAILQ_INSERT_TAIL(&g_keyring, key, link);
2904 		}
2905 	}
2906 	spdk_spin_unlock(&g_keyring_spin);
2907 
2908 	if (rc) {
2909 		goto error;
2910 	}
2911 
2912 	return 0;
2913 
2914 error:
2915 	accel_crypto_key_free_mem(key);
2916 	return rc;
2917 }
2918 
2919 int
2920 spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key)
2921 {
2922 	if (!key || !key->module_if) {
2923 		return -EINVAL;
2924 	}
2925 
2926 	spdk_spin_lock(&g_keyring_spin);
2927 	if (!_accel_crypto_key_get(key->param.key_name)) {
2928 		spdk_spin_unlock(&g_keyring_spin);
2929 		return -ENOENT;
2930 	}
2931 	TAILQ_REMOVE(&g_keyring, key, link);
2932 	spdk_spin_unlock(&g_keyring_spin);
2933 
2934 	accel_crypto_key_destroy_unsafe(key);
2935 
2936 	return 0;
2937 }
2938 
2939 struct spdk_accel_crypto_key *
2940 spdk_accel_crypto_key_get(const char *name)
2941 {
2942 	struct spdk_accel_crypto_key *key;
2943 
2944 	spdk_spin_lock(&g_keyring_spin);
2945 	key = _accel_crypto_key_get(name);
2946 	spdk_spin_unlock(&g_keyring_spin);
2947 
2948 	return key;
2949 }
2950 
2951 /* Helper function when accel modules register with the framework. */
2952 void
2953 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module)
2954 {
2955 	struct spdk_accel_module_if *tmp;
2956 
2957 	if (_module_find_by_name(accel_module->name)) {
2958 		SPDK_NOTICELOG("Module %s already registered\n", accel_module->name);
2959 		assert(false);
2960 		return;
2961 	}
2962 
2963 	TAILQ_FOREACH(tmp, &spdk_accel_module_list, tailq) {
2964 		if (accel_module->priority < tmp->priority) {
2965 			break;
2966 		}
2967 	}
2968 
2969 	if (tmp != NULL) {
2970 		TAILQ_INSERT_BEFORE(tmp, accel_module, tailq);
2971 	} else {
2972 		TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq);
2973 	}
2974 }
2975 
2976 /* Framework level channel create callback. */
2977 static int
2978 accel_create_channel(void *io_device, void *ctx_buf)
2979 {
2980 	struct accel_io_channel	*accel_ch = ctx_buf;
2981 	struct spdk_accel_task *accel_task;
2982 	struct spdk_accel_task_aux_data *accel_task_aux;
2983 	struct spdk_accel_sequence *seq;
2984 	struct accel_buffer *buf;
2985 	size_t task_size_aligned;
2986 	uint8_t *task_mem;
2987 	uint32_t i = 0, j;
2988 	int rc;
2989 
2990 	task_size_aligned = SPDK_ALIGN_CEIL(g_max_accel_module_size, SPDK_CACHE_LINE_SIZE);
2991 	accel_ch->task_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE,
2992 				   g_opts.task_count * task_size_aligned);
2993 	if (!accel_ch->task_pool_base) {
2994 		return -ENOMEM;
2995 	}
2996 	memset(accel_ch->task_pool_base, 0, g_opts.task_count * task_size_aligned);
2997 
2998 	accel_ch->seq_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE,
2999 						g_opts.sequence_count * sizeof(struct spdk_accel_sequence));
3000 	if (accel_ch->seq_pool_base == NULL) {
3001 		goto err;
3002 	}
3003 	memset(accel_ch->seq_pool_base, 0, g_opts.sequence_count * sizeof(struct spdk_accel_sequence));
3004 
3005 	accel_ch->task_aux_data_base = calloc(g_opts.task_count, sizeof(struct spdk_accel_task_aux_data));
3006 	if (accel_ch->task_aux_data_base == NULL) {
3007 		goto err;
3008 	}
3009 
3010 	accel_ch->buf_pool_base = calloc(g_opts.buf_count, sizeof(struct accel_buffer));
3011 	if (accel_ch->buf_pool_base == NULL) {
3012 		goto err;
3013 	}
3014 
3015 	STAILQ_INIT(&accel_ch->task_pool);
3016 	SLIST_INIT(&accel_ch->task_aux_data_pool);
3017 	SLIST_INIT(&accel_ch->seq_pool);
3018 	SLIST_INIT(&accel_ch->buf_pool);
3019 
3020 	task_mem = accel_ch->task_pool_base;
3021 	for (i = 0; i < g_opts.task_count; i++) {
3022 		accel_task = (struct spdk_accel_task *)task_mem;
3023 		accel_task->aux = NULL;
3024 		STAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link);
3025 		task_mem += task_size_aligned;
3026 		accel_task_aux = &accel_ch->task_aux_data_base[i];
3027 		SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task_aux, link);
3028 	}
3029 	for (i = 0; i < g_opts.sequence_count; i++) {
3030 		seq = &accel_ch->seq_pool_base[i];
3031 		SLIST_INSERT_HEAD(&accel_ch->seq_pool, seq, link);
3032 	}
3033 	for (i = 0; i < g_opts.buf_count; i++) {
3034 		buf = &accel_ch->buf_pool_base[i];
3035 		SLIST_INSERT_HEAD(&accel_ch->buf_pool, buf, link);
3036 	}
3037 
3038 	/* Assign modules and get IO channels for each */
3039 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
3040 		accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel();
3041 		/* This can happen if idxd runs out of channels. */
3042 		if (accel_ch->module_ch[i] == NULL) {
3043 			SPDK_ERRLOG("Module %s failed to get io channel\n", g_modules_opc[i].module->name);
3044 			goto err;
3045 		}
3046 	}
3047 
3048 	if (g_accel_driver != NULL) {
3049 		accel_ch->driver_channel = g_accel_driver->get_io_channel();
3050 		if (accel_ch->driver_channel == NULL) {
3051 			SPDK_ERRLOG("Failed to get driver's IO channel\n");
3052 			goto err;
3053 		}
3054 	}
3055 
3056 	rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", g_opts.small_cache_size,
3057 				     g_opts.large_cache_size);
3058 	if (rc != 0) {
3059 		SPDK_ERRLOG("Failed to initialize iobuf accel channel\n");
3060 		goto err;
3061 	}
3062 
3063 	return 0;
3064 err:
3065 	if (accel_ch->driver_channel != NULL) {
3066 		spdk_put_io_channel(accel_ch->driver_channel);
3067 	}
3068 	for (j = 0; j < i; j++) {
3069 		spdk_put_io_channel(accel_ch->module_ch[j]);
3070 	}
3071 	free(accel_ch->task_pool_base);
3072 	free(accel_ch->task_aux_data_base);
3073 	free(accel_ch->seq_pool_base);
3074 	free(accel_ch->buf_pool_base);
3075 
3076 	return -ENOMEM;
3077 }
3078 
3079 static void
3080 accel_add_stats(struct accel_stats *total, struct accel_stats *stats)
3081 {
3082 	int i;
3083 
3084 	total->sequence_executed += stats->sequence_executed;
3085 	total->sequence_failed += stats->sequence_failed;
3086 	total->sequence_outstanding += stats->sequence_outstanding;
3087 	total->task_outstanding += stats->task_outstanding;
3088 	total->retry.task += stats->retry.task;
3089 	total->retry.sequence += stats->retry.sequence;
3090 	total->retry.iobuf += stats->retry.iobuf;
3091 	total->retry.bufdesc += stats->retry.bufdesc;
3092 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; ++i) {
3093 		total->operations[i].executed += stats->operations[i].executed;
3094 		total->operations[i].failed += stats->operations[i].failed;
3095 		total->operations[i].num_bytes += stats->operations[i].num_bytes;
3096 	}
3097 }
3098 
3099 /* Framework level channel destroy callback. */
3100 static void
3101 accel_destroy_channel(void *io_device, void *ctx_buf)
3102 {
3103 	struct accel_io_channel	*accel_ch = ctx_buf;
3104 	int i;
3105 
3106 	spdk_iobuf_channel_fini(&accel_ch->iobuf);
3107 
3108 	if (accel_ch->driver_channel != NULL) {
3109 		spdk_put_io_channel(accel_ch->driver_channel);
3110 	}
3111 
3112 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
3113 		assert(accel_ch->module_ch[i] != NULL);
3114 		spdk_put_io_channel(accel_ch->module_ch[i]);
3115 		accel_ch->module_ch[i] = NULL;
3116 	}
3117 
3118 	/* Update global stats to make sure channel's stats aren't lost after a channel is gone */
3119 	spdk_spin_lock(&g_stats_lock);
3120 	accel_add_stats(&g_stats, &accel_ch->stats);
3121 	spdk_spin_unlock(&g_stats_lock);
3122 
3123 	free(accel_ch->task_pool_base);
3124 	free(accel_ch->task_aux_data_base);
3125 	free(accel_ch->seq_pool_base);
3126 	free(accel_ch->buf_pool_base);
3127 }
3128 
3129 struct spdk_io_channel *
3130 spdk_accel_get_io_channel(void)
3131 {
3132 	return spdk_get_io_channel(&spdk_accel_module_list);
3133 }
3134 
3135 static int
3136 accel_module_initialize(void)
3137 {
3138 	struct spdk_accel_module_if *accel_module, *tmp_module;
3139 	int rc = 0, module_rc;
3140 
3141 	TAILQ_FOREACH_SAFE(accel_module, &spdk_accel_module_list, tailq, tmp_module) {
3142 		module_rc = accel_module->module_init();
3143 		if (module_rc) {
3144 			TAILQ_REMOVE(&spdk_accel_module_list, accel_module, tailq);
3145 			if (module_rc == -ENODEV) {
3146 				SPDK_NOTICELOG("No devices for module %s, skipping\n", accel_module->name);
3147 			} else if (!rc) {
3148 				SPDK_ERRLOG("Module %s initialization failed with %d\n", accel_module->name, module_rc);
3149 				rc = module_rc;
3150 			}
3151 			continue;
3152 		}
3153 
3154 		SPDK_DEBUGLOG(accel, "Module %s initialized.\n", accel_module->name);
3155 	}
3156 
3157 	return rc;
3158 }
3159 
3160 static void
3161 accel_module_init_opcode(enum spdk_accel_opcode opcode)
3162 {
3163 	struct accel_module *module = &g_modules_opc[opcode];
3164 	struct spdk_accel_module_if *module_if = module->module;
3165 
3166 	if (module_if->get_memory_domains != NULL) {
3167 		module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0;
3168 	}
3169 }
3170 
3171 static int
3172 accel_memory_domain_translate(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
3173 			      struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx,
3174 			      void *addr, size_t len, struct spdk_memory_domain_translation_result *result)
3175 {
3176 	struct accel_buffer *buf = src_domain_ctx;
3177 
3178 	SPDK_DEBUGLOG(accel, "translate addr %p, len %zu\n", addr, len);
3179 
3180 	assert(g_accel_domain == src_domain);
3181 	assert(spdk_memory_domain_get_system_domain() == dst_domain);
3182 	assert(buf->buf == NULL);
3183 	assert(addr == ACCEL_BUFFER_BASE);
3184 	assert(len == buf->len);
3185 
3186 	buf->buf = spdk_iobuf_get(&buf->ch->iobuf, buf->len, NULL, NULL);
3187 	if (spdk_unlikely(buf->buf == NULL)) {
3188 		return -ENOMEM;
3189 	}
3190 
3191 	result->iov_count = 1;
3192 	result->iov.iov_base = buf->buf;
3193 	result->iov.iov_len = buf->len;
3194 	SPDK_DEBUGLOG(accel, "translated addr %p\n", result->iov.iov_base);
3195 	return 0;
3196 }
3197 
3198 static void
3199 accel_memory_domain_invalidate(struct spdk_memory_domain *domain, void *domain_ctx,
3200 			       struct iovec *iov, uint32_t iovcnt)
3201 {
3202 	struct accel_buffer *buf = domain_ctx;
3203 
3204 	SPDK_DEBUGLOG(accel, "invalidate addr %p, len %zu\n", iov[0].iov_base, iov[0].iov_len);
3205 
3206 	assert(g_accel_domain == domain);
3207 	assert(iovcnt == 1);
3208 	assert(buf->buf != NULL);
3209 	assert(iov[0].iov_base == buf->buf);
3210 	assert(iov[0].iov_len == buf->len);
3211 
3212 	spdk_iobuf_put(&buf->ch->iobuf, buf->buf, buf->len);
3213 	buf->buf = NULL;
3214 }
3215 
3216 int
3217 spdk_accel_initialize(void)
3218 {
3219 	enum spdk_accel_opcode op;
3220 	struct spdk_accel_module_if *accel_module = NULL;
3221 	int rc;
3222 
3223 	/*
3224 	 * We need a unique identifier for the accel framework, so use the
3225 	 * spdk_accel_module_list address for this purpose.
3226 	 */
3227 	spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel,
3228 				sizeof(struct accel_io_channel), "accel");
3229 
3230 	spdk_spin_init(&g_keyring_spin);
3231 	spdk_spin_init(&g_stats_lock);
3232 
3233 	rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL,
3234 				       "SPDK_ACCEL_DMA_DEVICE");
3235 	if (rc != 0) {
3236 		SPDK_ERRLOG("Failed to create accel memory domain\n");
3237 		return rc;
3238 	}
3239 
3240 	spdk_memory_domain_set_translation(g_accel_domain, accel_memory_domain_translate);
3241 	spdk_memory_domain_set_invalidate(g_accel_domain, accel_memory_domain_invalidate);
3242 
3243 	g_modules_started = true;
3244 	rc = accel_module_initialize();
3245 	if (rc) {
3246 		return rc;
3247 	}
3248 
3249 	if (g_accel_driver != NULL && g_accel_driver->init != NULL) {
3250 		rc = g_accel_driver->init();
3251 		if (rc != 0) {
3252 			SPDK_ERRLOG("Failed to initialize driver %s: %s\n", g_accel_driver->name,
3253 				    spdk_strerror(-rc));
3254 			return rc;
3255 		}
3256 	}
3257 
3258 	/* The module list is order by priority, with the highest priority modules being at the end
3259 	 * of the list.  The software module should be somewhere at the beginning of the list,
3260 	 * before all HW modules.
3261 	 * NOTE: all opcodes must be supported by software in the event that no HW modules are
3262 	 * initialized to support the operation.
3263 	 */
3264 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
3265 		for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
3266 			if (accel_module->supports_opcode(op)) {
3267 				g_modules_opc[op].module = accel_module;
3268 				SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name);
3269 			}
3270 		}
3271 
3272 		if (accel_module->get_ctx_size != NULL) {
3273 			g_max_accel_module_size = spdk_max(g_max_accel_module_size,
3274 							   accel_module->get_ctx_size());
3275 		}
3276 	}
3277 
3278 	/* Now lets check for overrides and apply all that exist */
3279 	for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
3280 		if (g_modules_opc_override[op] != NULL) {
3281 			accel_module = _module_find_by_name(g_modules_opc_override[op]);
3282 			if (accel_module == NULL) {
3283 				SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]);
3284 				return -EINVAL;
3285 			}
3286 			if (accel_module->supports_opcode(op) == false) {
3287 				SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op);
3288 				return -EINVAL;
3289 			}
3290 			g_modules_opc[op].module = accel_module;
3291 		}
3292 	}
3293 
3294 	if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) {
3295 		SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations");
3296 		return -EINVAL;
3297 	}
3298 	if (g_modules_opc[SPDK_ACCEL_OPC_COMPRESS].module !=
3299 	    g_modules_opc[SPDK_ACCEL_OPC_DECOMPRESS].module) {
3300 		SPDK_ERRLOG("Different accel modules are assigned to compress and decompress operations");
3301 		return -EINVAL;
3302 	}
3303 
3304 	for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
3305 		assert(g_modules_opc[op].module != NULL);
3306 		accel_module_init_opcode(op);
3307 	}
3308 
3309 	rc = spdk_iobuf_register_module("accel");
3310 	if (rc != 0) {
3311 		SPDK_ERRLOG("Failed to register accel iobuf module\n");
3312 		return rc;
3313 	}
3314 
3315 	return 0;
3316 }
3317 
3318 static void
3319 accel_module_finish_cb(void)
3320 {
3321 	spdk_accel_fini_cb cb_fn = g_fini_cb_fn;
3322 
3323 	cb_fn(g_fini_cb_arg);
3324 	g_fini_cb_fn = NULL;
3325 	g_fini_cb_arg = NULL;
3326 }
3327 
3328 static void
3329 accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str,
3330 			   const char *module_str)
3331 {
3332 	spdk_json_write_object_begin(w);
3333 	spdk_json_write_named_string(w, "method", "accel_assign_opc");
3334 	spdk_json_write_named_object_begin(w, "params");
3335 	spdk_json_write_named_string(w, "opname", opc_str);
3336 	spdk_json_write_named_string(w, "module", module_str);
3337 	spdk_json_write_object_end(w);
3338 	spdk_json_write_object_end(w);
3339 }
3340 
3341 static void
3342 __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
3343 {
3344 	spdk_json_write_named_string(w, "name", key->param.key_name);
3345 	spdk_json_write_named_string(w, "cipher", key->param.cipher);
3346 	spdk_json_write_named_string(w, "key", key->param.hex_key);
3347 	if (key->param.hex_key2) {
3348 		spdk_json_write_named_string(w, "key2", key->param.hex_key2);
3349 	}
3350 
3351 	if (key->param.tweak_mode) {
3352 		spdk_json_write_named_string(w, "tweak_mode", key->param.tweak_mode);
3353 	}
3354 }
3355 
3356 void
3357 _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
3358 {
3359 	spdk_json_write_object_begin(w);
3360 	__accel_crypto_key_dump_param(w, key);
3361 	spdk_json_write_object_end(w);
3362 }
3363 
3364 static void
3365 _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w,
3366 				    struct spdk_accel_crypto_key *key)
3367 {
3368 	spdk_json_write_object_begin(w);
3369 	spdk_json_write_named_string(w, "method", "accel_crypto_key_create");
3370 	spdk_json_write_named_object_begin(w, "params");
3371 	__accel_crypto_key_dump_param(w, key);
3372 	spdk_json_write_object_end(w);
3373 	spdk_json_write_object_end(w);
3374 }
3375 
3376 static void
3377 accel_write_options(struct spdk_json_write_ctx *w)
3378 {
3379 	spdk_json_write_object_begin(w);
3380 	spdk_json_write_named_string(w, "method", "accel_set_options");
3381 	spdk_json_write_named_object_begin(w, "params");
3382 	spdk_json_write_named_uint32(w, "small_cache_size", g_opts.small_cache_size);
3383 	spdk_json_write_named_uint32(w, "large_cache_size", g_opts.large_cache_size);
3384 	spdk_json_write_named_uint32(w, "task_count", g_opts.task_count);
3385 	spdk_json_write_named_uint32(w, "sequence_count", g_opts.sequence_count);
3386 	spdk_json_write_named_uint32(w, "buf_count", g_opts.buf_count);
3387 	spdk_json_write_object_end(w);
3388 	spdk_json_write_object_end(w);
3389 }
3390 
3391 static void
3392 _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump)
3393 {
3394 	struct spdk_accel_crypto_key *key;
3395 
3396 	spdk_spin_lock(&g_keyring_spin);
3397 	TAILQ_FOREACH(key, &g_keyring, link) {
3398 		if (full_dump) {
3399 			_accel_crypto_key_write_config_json(w, key);
3400 		} else {
3401 			_accel_crypto_key_dump_param(w, key);
3402 		}
3403 	}
3404 	spdk_spin_unlock(&g_keyring_spin);
3405 }
3406 
3407 void
3408 _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w)
3409 {
3410 	_accel_crypto_keys_write_config_json(w, false);
3411 }
3412 
3413 void
3414 spdk_accel_write_config_json(struct spdk_json_write_ctx *w)
3415 {
3416 	struct spdk_accel_module_if *accel_module;
3417 	int i;
3418 
3419 	spdk_json_write_array_begin(w);
3420 	accel_write_options(w);
3421 
3422 	if (g_accel_driver != NULL) {
3423 		spdk_json_write_object_begin(w);
3424 		spdk_json_write_named_string(w, "method", "accel_set_driver");
3425 		spdk_json_write_named_object_begin(w, "params");
3426 		spdk_json_write_named_string(w, "name", g_accel_driver->name);
3427 		spdk_json_write_object_end(w);
3428 		spdk_json_write_object_end(w);
3429 	}
3430 
3431 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
3432 		if (accel_module->write_config_json) {
3433 			accel_module->write_config_json(w);
3434 		}
3435 	}
3436 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
3437 		if (g_modules_opc_override[i]) {
3438 			accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]);
3439 		}
3440 	}
3441 
3442 	_accel_crypto_keys_write_config_json(w, true);
3443 
3444 	spdk_json_write_array_end(w);
3445 }
3446 
3447 void
3448 spdk_accel_module_finish(void)
3449 {
3450 	if (!g_accel_module) {
3451 		g_accel_module = TAILQ_FIRST(&spdk_accel_module_list);
3452 	} else {
3453 		g_accel_module = TAILQ_NEXT(g_accel_module, tailq);
3454 	}
3455 
3456 	if (!g_accel_module) {
3457 		if (g_accel_driver != NULL && g_accel_driver->fini != NULL) {
3458 			g_accel_driver->fini();
3459 		}
3460 
3461 		spdk_spin_destroy(&g_keyring_spin);
3462 		spdk_spin_destroy(&g_stats_lock);
3463 		if (g_accel_domain) {
3464 			spdk_memory_domain_destroy(g_accel_domain);
3465 			g_accel_domain = NULL;
3466 		}
3467 		accel_module_finish_cb();
3468 		return;
3469 	}
3470 
3471 	if (g_accel_module->module_fini) {
3472 		spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL);
3473 	} else {
3474 		spdk_accel_module_finish();
3475 	}
3476 }
3477 
3478 static void
3479 accel_io_device_unregister_cb(void *io_device)
3480 {
3481 	struct spdk_accel_crypto_key *key, *key_tmp;
3482 	enum spdk_accel_opcode op;
3483 
3484 	spdk_spin_lock(&g_keyring_spin);
3485 	TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) {
3486 		accel_crypto_key_destroy_unsafe(key);
3487 	}
3488 	spdk_spin_unlock(&g_keyring_spin);
3489 
3490 	for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
3491 		if (g_modules_opc_override[op] != NULL) {
3492 			free(g_modules_opc_override[op]);
3493 			g_modules_opc_override[op] = NULL;
3494 		}
3495 		g_modules_opc[op].module = NULL;
3496 	}
3497 
3498 	spdk_accel_module_finish();
3499 }
3500 
3501 void
3502 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg)
3503 {
3504 	assert(cb_fn != NULL);
3505 
3506 	g_fini_cb_fn = cb_fn;
3507 	g_fini_cb_arg = cb_arg;
3508 
3509 	spdk_io_device_unregister(&spdk_accel_module_list, accel_io_device_unregister_cb);
3510 }
3511 
3512 static struct spdk_accel_driver *
3513 accel_find_driver(const char *name)
3514 {
3515 	struct spdk_accel_driver *driver;
3516 
3517 	TAILQ_FOREACH(driver, &g_accel_drivers, tailq) {
3518 		if (strcmp(driver->name, name) == 0) {
3519 			return driver;
3520 		}
3521 	}
3522 
3523 	return NULL;
3524 }
3525 
3526 int
3527 spdk_accel_set_driver(const char *name)
3528 {
3529 	struct spdk_accel_driver *driver = NULL;
3530 
3531 	if (name != NULL && name[0] != '\0') {
3532 		driver = accel_find_driver(name);
3533 		if (driver == NULL) {
3534 			SPDK_ERRLOG("Couldn't find driver named '%s'\n", name);
3535 			return -ENODEV;
3536 		}
3537 	}
3538 
3539 	g_accel_driver = driver;
3540 
3541 	return 0;
3542 }
3543 
3544 const char *
3545 spdk_accel_get_driver_name(void)
3546 {
3547 	if (!g_accel_driver) {
3548 		return NULL;
3549 	}
3550 
3551 	return g_accel_driver->name;
3552 }
3553 
3554 void
3555 spdk_accel_driver_register(struct spdk_accel_driver *driver)
3556 {
3557 	if (accel_find_driver(driver->name)) {
3558 		SPDK_ERRLOG("Driver named '%s' has already been registered\n", driver->name);
3559 		assert(0);
3560 		return;
3561 	}
3562 
3563 	TAILQ_INSERT_TAIL(&g_accel_drivers, driver, tailq);
3564 }
3565 
3566 int
3567 spdk_accel_set_opts(const struct spdk_accel_opts *opts)
3568 {
3569 	if (!opts) {
3570 		SPDK_ERRLOG("opts cannot be NULL\n");
3571 		return -1;
3572 	}
3573 
3574 	if (!opts->opts_size) {
3575 		SPDK_ERRLOG("opts_size inside opts cannot be zero value\n");
3576 		return -1;
3577 	}
3578 
3579 	if (SPDK_GET_FIELD(opts, task_count, g_opts.task_count,
3580 			   opts->opts_size) < ACCEL_TASKS_IN_SEQUENCE_LIMIT) {
3581 		return -EINVAL;
3582 	}
3583 
3584 #define SET_FIELD(field) \
3585         if (offsetof(struct spdk_accel_opts, field) + sizeof(opts->field) <= opts->opts_size) { \
3586                 g_opts.field = opts->field; \
3587         } \
3588 
3589 	SET_FIELD(small_cache_size);
3590 	SET_FIELD(large_cache_size);
3591 	SET_FIELD(task_count);
3592 	SET_FIELD(sequence_count);
3593 	SET_FIELD(buf_count);
3594 
3595 	g_opts.opts_size = opts->opts_size;
3596 
3597 #undef SET_FIELD
3598 
3599 	return 0;
3600 }
3601 
3602 void
3603 spdk_accel_get_opts(struct spdk_accel_opts *opts, size_t opts_size)
3604 {
3605 	if (!opts) {
3606 		SPDK_ERRLOG("opts should not be NULL\n");
3607 		return;
3608 	}
3609 
3610 	if (!opts_size) {
3611 		SPDK_ERRLOG("opts_size should not be zero value\n");
3612 		return;
3613 	}
3614 
3615 	opts->opts_size = opts_size;
3616 
3617 #define SET_FIELD(field) \
3618 	if (offsetof(struct spdk_accel_opts, field) + sizeof(opts->field) <= opts_size) { \
3619 		opts->field = g_opts.field; \
3620 	} \
3621 
3622 	SET_FIELD(small_cache_size);
3623 	SET_FIELD(large_cache_size);
3624 	SET_FIELD(task_count);
3625 	SET_FIELD(sequence_count);
3626 	SET_FIELD(buf_count);
3627 
3628 #undef SET_FIELD
3629 
3630 	/* Do not remove this statement, you should always update this statement when you adding a new field,
3631 	 * and do not forget to add the SET_FIELD statement for your added field. */
3632 	SPDK_STATIC_ASSERT(sizeof(struct spdk_accel_opts) == 28, "Incorrect size");
3633 }
3634 
3635 struct accel_get_stats_ctx {
3636 	struct accel_stats	stats;
3637 	accel_get_stats_cb	cb_fn;
3638 	void			*cb_arg;
3639 };
3640 
3641 static void
3642 accel_get_channel_stats_done(struct spdk_io_channel_iter *iter, int status)
3643 {
3644 	struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
3645 
3646 	ctx->cb_fn(&ctx->stats, ctx->cb_arg);
3647 	free(ctx);
3648 }
3649 
3650 static void
3651 accel_get_channel_stats(struct spdk_io_channel_iter *iter)
3652 {
3653 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(iter);
3654 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
3655 	struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
3656 
3657 	accel_add_stats(&ctx->stats, &accel_ch->stats);
3658 	spdk_for_each_channel_continue(iter, 0);
3659 }
3660 
3661 int
3662 accel_get_stats(accel_get_stats_cb cb_fn, void *cb_arg)
3663 {
3664 	struct accel_get_stats_ctx *ctx;
3665 
3666 	ctx = calloc(1, sizeof(*ctx));
3667 	if (ctx == NULL) {
3668 		return -ENOMEM;
3669 	}
3670 
3671 	spdk_spin_lock(&g_stats_lock);
3672 	accel_add_stats(&ctx->stats, &g_stats);
3673 	spdk_spin_unlock(&g_stats_lock);
3674 
3675 	ctx->cb_fn = cb_fn;
3676 	ctx->cb_arg = cb_arg;
3677 
3678 	spdk_for_each_channel(&spdk_accel_module_list, accel_get_channel_stats, ctx,
3679 			      accel_get_channel_stats_done);
3680 
3681 	return 0;
3682 }
3683 
3684 void
3685 spdk_accel_get_opcode_stats(struct spdk_io_channel *ch, enum spdk_accel_opcode opcode,
3686 			    struct spdk_accel_opcode_stats *stats, size_t size)
3687 {
3688 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
3689 
3690 #define FIELD_OK(field) \
3691 	offsetof(struct spdk_accel_opcode_stats, field) + sizeof(stats->field) <= size
3692 
3693 #define SET_FIELD(field, value) \
3694 	if (FIELD_OK(field)) { \
3695 		stats->field = value; \
3696 	}
3697 
3698 	SET_FIELD(executed, accel_ch->stats.operations[opcode].executed);
3699 	SET_FIELD(failed, accel_ch->stats.operations[opcode].failed);
3700 	SET_FIELD(num_bytes, accel_ch->stats.operations[opcode].num_bytes);
3701 
3702 #undef FIELD_OK
3703 #undef SET_FIELD
3704 }
3705 
3706 uint8_t
3707 spdk_accel_get_buf_align(enum spdk_accel_opcode opcode,
3708 			 const struct spdk_accel_operation_exec_ctx *ctx)
3709 {
3710 	struct spdk_accel_module_if *module = g_modules_opc[opcode].module;
3711 	struct spdk_accel_opcode_info modinfo = {}, drvinfo = {};
3712 
3713 	if (g_accel_driver != NULL && g_accel_driver->get_operation_info != NULL) {
3714 		g_accel_driver->get_operation_info(opcode, ctx, &drvinfo);
3715 	}
3716 
3717 	if (module->get_operation_info != NULL) {
3718 		module->get_operation_info(opcode, ctx, &modinfo);
3719 	}
3720 
3721 	/* If a driver is set, it'll execute most of the operations, while the rest will usually
3722 	 * fall back to accel_sw, which doesn't have any alignment requirements.  However, to be
3723 	 * extra safe, return the max(driver, module) if a driver delegates some operations to a
3724 	 * hardware module. */
3725 	return spdk_max(modinfo.required_alignment, drvinfo.required_alignment);
3726 }
3727 
3728 struct spdk_accel_module_if *
3729 spdk_accel_get_module(const char *name)
3730 {
3731 	struct spdk_accel_module_if *module;
3732 
3733 	TAILQ_FOREACH(module, &spdk_accel_module_list, tailq) {
3734 		if (strcmp(module->name, name) == 0) {
3735 			return module;
3736 		}
3737 	}
3738 
3739 	return NULL;
3740 }
3741 
3742 int
3743 spdk_accel_get_opc_memory_domains(enum spdk_accel_opcode opcode,
3744 				  struct spdk_memory_domain **domains,
3745 				  int array_size)
3746 {
3747 	assert(opcode < SPDK_ACCEL_OPC_LAST);
3748 
3749 	if (g_modules_opc[opcode].module->get_memory_domains) {
3750 		return g_modules_opc[opcode].module->get_memory_domains(domains, array_size);
3751 	}
3752 
3753 	return 0;
3754 }
3755 
3756 SPDK_LOG_REGISTER_COMPONENT(accel)
3757