xref: /spdk/lib/accel/accel.c (revision 9b562bdf50151b749f4ccd777783d41e2cd06a4b)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2020 Intel Corporation.
3  *   Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES.
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/accel_module.h"
10 
11 #include "accel_internal.h"
12 
13 #include "spdk/dma.h"
14 #include "spdk/env.h"
15 #include "spdk/likely.h"
16 #include "spdk/log.h"
17 #include "spdk/thread.h"
18 #include "spdk/json.h"
19 #include "spdk/crc32.h"
20 #include "spdk/util.h"
21 #include "spdk/hexlify.h"
22 #include "spdk/string.h"
23 
24 /* Accelerator Framework: The following provides a top level
25  * generic API for the accelerator functions defined here. Modules,
26  * such as the one in /module/accel/ioat, supply the implementation
27  * with the exception of the pure software implementation contained
28  * later in this file.
29  */
30 
31 #define ALIGN_4K			0x1000
32 #define MAX_TASKS_PER_CHANNEL		0x800
33 #define ACCEL_SMALL_CACHE_SIZE		128
34 #define ACCEL_LARGE_CACHE_SIZE		16
35 /* Set MSB, so we don't return NULL pointers as buffers */
36 #define ACCEL_BUFFER_BASE		((void *)(1ull << 63))
37 #define ACCEL_BUFFER_OFFSET_MASK	((uintptr_t)ACCEL_BUFFER_BASE - 1)
38 
39 #define ACCEL_CRYPTO_TWEAK_MODE_DEFAULT	SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA
40 
41 struct accel_module {
42 	struct spdk_accel_module_if	*module;
43 	bool				supports_memory_domains;
44 };
45 
46 /* Largest context size for all accel modules */
47 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task);
48 
49 static struct spdk_accel_module_if *g_accel_module = NULL;
50 static spdk_accel_fini_cb g_fini_cb_fn = NULL;
51 static void *g_fini_cb_arg = NULL;
52 static bool g_modules_started = false;
53 static struct spdk_memory_domain *g_accel_domain;
54 
55 /* Global list of registered accelerator modules */
56 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list =
57 	TAILQ_HEAD_INITIALIZER(spdk_accel_module_list);
58 
59 /* Crypto keyring */
60 static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring);
61 static struct spdk_spinlock g_keyring_spin;
62 
63 /* Global array mapping capabilities to modules */
64 static struct accel_module g_modules_opc[SPDK_ACCEL_OPC_LAST] = {};
65 static char *g_modules_opc_override[SPDK_ACCEL_OPC_LAST] = {};
66 TAILQ_HEAD(, spdk_accel_driver) g_accel_drivers = TAILQ_HEAD_INITIALIZER(g_accel_drivers);
67 static struct spdk_accel_driver *g_accel_driver;
68 static struct spdk_accel_opts g_opts = {
69 	.small_cache_size = ACCEL_SMALL_CACHE_SIZE,
70 	.large_cache_size = ACCEL_LARGE_CACHE_SIZE,
71 	.task_count = MAX_TASKS_PER_CHANNEL,
72 	.sequence_count = MAX_TASKS_PER_CHANNEL,
73 	.buf_count = MAX_TASKS_PER_CHANNEL,
74 };
75 static struct accel_stats g_stats;
76 static struct spdk_spinlock g_stats_lock;
77 
78 static const char *g_opcode_strings[SPDK_ACCEL_OPC_LAST] = {
79 	"copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c",
80 	"compress", "decompress", "encrypt", "decrypt", "xor",
81 	"dif_verify", "dif_generate", "dif_generate_copy"
82 };
83 
84 enum accel_sequence_state {
85 	ACCEL_SEQUENCE_STATE_INIT,
86 	ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF,
87 	ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF,
88 	ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF,
89 	ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF,
90 	ACCEL_SEQUENCE_STATE_PULL_DATA,
91 	ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA,
92 	ACCEL_SEQUENCE_STATE_EXEC_TASK,
93 	ACCEL_SEQUENCE_STATE_AWAIT_TASK,
94 	ACCEL_SEQUENCE_STATE_COMPLETE_TASK,
95 	ACCEL_SEQUENCE_STATE_NEXT_TASK,
96 	ACCEL_SEQUENCE_STATE_PUSH_DATA,
97 	ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA,
98 	ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS,
99 	ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS,
100 	ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS,
101 	ACCEL_SEQUENCE_STATE_ERROR,
102 	ACCEL_SEQUENCE_STATE_MAX,
103 };
104 
105 static const char *g_seq_states[]
106 __attribute__((unused)) = {
107 	[ACCEL_SEQUENCE_STATE_INIT] = "init",
108 	[ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf",
109 	[ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf",
110 	[ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf",
111 	[ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf",
112 	[ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data",
113 	[ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data",
114 	[ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task",
115 	[ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task",
116 	[ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task",
117 	[ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task",
118 	[ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data",
119 	[ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data",
120 	[ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS] = "driver-exec-tasks",
121 	[ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS] = "driver-await-tasks",
122 	[ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS] = "driver-complete-tasks",
123 	[ACCEL_SEQUENCE_STATE_ERROR] = "error",
124 	[ACCEL_SEQUENCE_STATE_MAX] = "",
125 };
126 
127 #define ACCEL_SEQUENCE_STATE_STRING(s) \
128 	(((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \
129 	 ? g_seq_states[s] : "unknown")
130 
131 struct accel_buffer {
132 	struct spdk_accel_sequence	*seq;
133 	void				*buf;
134 	uint64_t			len;
135 	struct spdk_iobuf_entry		iobuf;
136 	spdk_accel_sequence_get_buf_cb	cb_fn;
137 	void				*cb_ctx;
138 	SLIST_ENTRY(accel_buffer)	link;
139 	struct accel_io_channel		*ch;
140 };
141 
142 struct accel_io_channel {
143 	struct spdk_io_channel			*module_ch[SPDK_ACCEL_OPC_LAST];
144 	struct spdk_io_channel			*driver_channel;
145 	void					*task_pool_base;
146 	struct spdk_accel_sequence		*seq_pool_base;
147 	struct accel_buffer			*buf_pool_base;
148 	struct spdk_accel_task_aux_data		*task_aux_data_base;
149 	STAILQ_HEAD(, spdk_accel_task)		task_pool;
150 	SLIST_HEAD(, spdk_accel_task_aux_data)	task_aux_data_pool;
151 	SLIST_HEAD(, spdk_accel_sequence)	seq_pool;
152 	SLIST_HEAD(, accel_buffer)		buf_pool;
153 	struct spdk_iobuf_channel		iobuf;
154 	struct accel_stats			stats;
155 };
156 
157 TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task);
158 
159 struct spdk_accel_sequence {
160 	struct accel_io_channel			*ch;
161 	struct accel_sequence_tasks		tasks;
162 	SLIST_HEAD(, accel_buffer)		bounce_bufs;
163 	int					status;
164 	/* state uses enum accel_sequence_state */
165 	uint8_t					state;
166 	bool					in_process_sequence;
167 	spdk_accel_completion_cb		cb_fn;
168 	void					*cb_arg;
169 	SLIST_ENTRY(spdk_accel_sequence)	link;
170 };
171 SPDK_STATIC_ASSERT(sizeof(struct spdk_accel_sequence) == 64, "invalid size");
172 
173 #define accel_update_stats(ch, event, v) \
174 	do { \
175 		(ch)->stats.event += (v); \
176 	} while (0)
177 
178 #define accel_update_task_stats(ch, task, event, v) \
179 	accel_update_stats(ch, operations[(task)->op_code].event, v)
180 
181 static inline void accel_sequence_task_cb(void *cb_arg, int status);
182 
183 static inline void
184 accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state)
185 {
186 	SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq,
187 		      ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state));
188 	assert(seq->state != ACCEL_SEQUENCE_STATE_ERROR || state == ACCEL_SEQUENCE_STATE_ERROR);
189 	seq->state = state;
190 }
191 
192 static void
193 accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status)
194 {
195 	accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
196 	assert(status != 0);
197 	seq->status = status;
198 }
199 
200 int
201 spdk_accel_get_opc_module_name(enum spdk_accel_opcode opcode, const char **module_name)
202 {
203 	if (opcode >= SPDK_ACCEL_OPC_LAST) {
204 		/* invalid opcode */
205 		return -EINVAL;
206 	}
207 
208 	if (g_modules_opc[opcode].module) {
209 		*module_name = g_modules_opc[opcode].module->name;
210 	} else {
211 		return -ENOENT;
212 	}
213 
214 	return 0;
215 }
216 
217 void
218 _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn)
219 {
220 	struct spdk_accel_module_if *accel_module;
221 	enum spdk_accel_opcode opcode;
222 	int j = 0;
223 
224 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
225 		for (opcode = 0; opcode < SPDK_ACCEL_OPC_LAST; opcode++) {
226 			if (accel_module->supports_opcode(opcode)) {
227 				info->ops[j] = opcode;
228 				j++;
229 			}
230 		}
231 		info->name = accel_module->name;
232 		info->num_ops = j;
233 		fn(info);
234 		j = 0;
235 	}
236 }
237 
238 const char *
239 spdk_accel_get_opcode_name(enum spdk_accel_opcode opcode)
240 {
241 	if (opcode < SPDK_ACCEL_OPC_LAST) {
242 		return g_opcode_strings[opcode];
243 	}
244 
245 	return NULL;
246 }
247 
248 int
249 spdk_accel_assign_opc(enum spdk_accel_opcode opcode, const char *name)
250 {
251 	char *copy;
252 
253 	if (g_modules_started == true) {
254 		/* we don't allow re-assignment once things have started */
255 		return -EINVAL;
256 	}
257 
258 	if (opcode >= SPDK_ACCEL_OPC_LAST) {
259 		/* invalid opcode */
260 		return -EINVAL;
261 	}
262 
263 	copy = strdup(name);
264 	if (copy == NULL) {
265 		return -ENOMEM;
266 	}
267 
268 	/* module selection will be validated after the framework starts. */
269 	free(g_modules_opc_override[opcode]);
270 	g_modules_opc_override[opcode] = copy;
271 
272 	return 0;
273 }
274 
275 void
276 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status)
277 {
278 	struct accel_io_channel		*accel_ch = accel_task->accel_ch;
279 	spdk_accel_completion_cb	cb_fn;
280 	void				*cb_arg;
281 
282 	accel_update_task_stats(accel_ch, accel_task, executed, 1);
283 	accel_update_task_stats(accel_ch, accel_task, num_bytes, accel_task->nbytes);
284 	if (spdk_unlikely(status != 0)) {
285 		accel_update_task_stats(accel_ch, accel_task, failed, 1);
286 	}
287 
288 	if (accel_task->seq) {
289 		accel_sequence_task_cb(accel_task->seq, status);
290 		return;
291 	}
292 
293 	cb_fn = accel_task->cb_fn;
294 	cb_arg = accel_task->cb_arg;
295 
296 	if (accel_task->has_aux) {
297 		SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task->aux, link);
298 		accel_task->aux = NULL;
299 		accel_task->has_aux = false;
300 	}
301 
302 	/* We should put the accel_task into the list firstly in order to avoid
303 	 * the accel task list is exhausted when there is recursive call to
304 	 * allocate accel_task in user's call back function (cb_fn)
305 	 */
306 	STAILQ_INSERT_HEAD(&accel_ch->task_pool, accel_task, link);
307 
308 	cb_fn(cb_arg, status);
309 }
310 
311 inline static struct spdk_accel_task *
312 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg)
313 {
314 	struct spdk_accel_task *accel_task;
315 
316 	accel_task = STAILQ_FIRST(&accel_ch->task_pool);
317 	if (spdk_unlikely(accel_task == NULL)) {
318 		accel_update_stats(accel_ch, retry.task, 1);
319 		return NULL;
320 	}
321 
322 	STAILQ_REMOVE_HEAD(&accel_ch->task_pool, link);
323 	accel_task->link.stqe_next = NULL;
324 
325 	accel_task->cb_fn = cb_fn;
326 	accel_task->cb_arg = cb_arg;
327 	accel_task->accel_ch = accel_ch;
328 	accel_task->s.iovs = NULL;
329 	accel_task->d.iovs = NULL;
330 
331 	return accel_task;
332 }
333 
334 static inline int
335 accel_submit_task(struct accel_io_channel *accel_ch, struct spdk_accel_task *task)
336 {
337 	struct spdk_io_channel *module_ch = accel_ch->module_ch[task->op_code];
338 	struct spdk_accel_module_if *module = g_modules_opc[task->op_code].module;
339 	int rc;
340 
341 	rc = module->submit_tasks(module_ch, task);
342 	if (spdk_unlikely(rc != 0)) {
343 		accel_update_task_stats(accel_ch, task, failed, 1);
344 	}
345 
346 	return rc;
347 }
348 
349 static inline uint64_t
350 accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt)
351 {
352 	uint64_t result = 0;
353 	uint32_t i;
354 
355 	for (i = 0; i < iovcnt; ++i) {
356 		result += iovs[i].iov_len;
357 	}
358 
359 	return result;
360 }
361 
362 #define ACCEL_TASK_ALLOC_AUX_BUF(task)						\
363 do {										\
364         (task)->aux = SLIST_FIRST(&(task)->accel_ch->task_aux_data_pool);	\
365         if (spdk_unlikely(!(task)->aux)) {					\
366                 SPDK_ERRLOG("Fatal problem, aux data was not allocated\n");	\
367                 STAILQ_INSERT_HEAD(&(task)->accel_ch->task_pool, (task), link);	\
368                 assert(0);							\
369                 return -ENOMEM;							\
370         }									\
371         SLIST_REMOVE_HEAD(&(task)->accel_ch->task_aux_data_pool, link);		\
372         (task)->has_aux = true;							\
373 } while (0)
374 
375 SPDK_LOG_DEPRECATION_REGISTER(accel_flags,
376 			      "The flags parameter is unused and deprecated",
377 			      "v24.05", 0);
378 
379 /* \b `flags` is int in API, since it is not used anywahere. we narrowed it down to uint8_t internally
380  * To prevent possible problems in the future, add a macro which checks that the value of `flags` passed in the API
381  * doesn't exceed 1 byte. */
382 #define ACCEL_ASSIGN_FLAGS(task, flags)							\
383 do {											\
384 	assert(((flags) & (~0xff)) == 0 && "task::flags needs to be extended");		\
385 	(task)->flags = (uint8_t)(flags);						\
386 	if ((task)->flags) {								\
387 		SPDK_LOG_DEPRECATED(accel_flags);					\
388 	}										\
389 } while (0)										\
390 
391 /* Accel framework public API for copy function */
392 int
393 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src,
394 		       uint64_t nbytes, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
395 {
396 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
397 	struct spdk_accel_task *accel_task;
398 
399 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
400 	if (spdk_unlikely(accel_task == NULL)) {
401 		return -ENOMEM;
402 	}
403 
404 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
405 
406 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
407 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
408 	accel_task->d.iovs[0].iov_base = dst;
409 	accel_task->d.iovs[0].iov_len = nbytes;
410 	accel_task->d.iovcnt = 1;
411 	accel_task->s.iovs[0].iov_base = src;
412 	accel_task->s.iovs[0].iov_len = nbytes;
413 	accel_task->s.iovcnt = 1;
414 	accel_task->nbytes = nbytes;
415 	accel_task->op_code = SPDK_ACCEL_OPC_COPY;
416 	accel_task->src_domain = NULL;
417 	accel_task->dst_domain = NULL;
418 	ACCEL_ASSIGN_FLAGS(accel_task, flags);
419 
420 	return accel_submit_task(accel_ch, accel_task);
421 }
422 
423 /* Accel framework public API for dual cast copy function */
424 int
425 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1,
426 			   void *dst2, void *src, uint64_t nbytes, int flags,
427 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
428 {
429 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
430 	struct spdk_accel_task *accel_task;
431 
432 	if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) {
433 		SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n");
434 		return -EINVAL;
435 	}
436 
437 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
438 	if (spdk_unlikely(accel_task == NULL)) {
439 		return -ENOMEM;
440 	}
441 
442 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
443 
444 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
445 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
446 	accel_task->d2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST2];
447 	accel_task->d.iovs[0].iov_base = dst1;
448 	accel_task->d.iovs[0].iov_len = nbytes;
449 	accel_task->d.iovcnt = 1;
450 	accel_task->d2.iovs[0].iov_base = dst2;
451 	accel_task->d2.iovs[0].iov_len = nbytes;
452 	accel_task->d2.iovcnt = 1;
453 	accel_task->s.iovs[0].iov_base = src;
454 	accel_task->s.iovs[0].iov_len = nbytes;
455 	accel_task->s.iovcnt = 1;
456 	accel_task->nbytes = nbytes;
457 	accel_task->op_code = SPDK_ACCEL_OPC_DUALCAST;
458 	accel_task->src_domain = NULL;
459 	accel_task->dst_domain = NULL;
460 	ACCEL_ASSIGN_FLAGS(accel_task, flags);
461 
462 	return accel_submit_task(accel_ch, accel_task);
463 }
464 
465 /* Accel framework public API for compare function */
466 
467 int
468 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1,
469 			  void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
470 			  void *cb_arg)
471 {
472 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
473 	struct spdk_accel_task *accel_task;
474 
475 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
476 	if (spdk_unlikely(accel_task == NULL)) {
477 		return -ENOMEM;
478 	}
479 
480 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
481 
482 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
483 	accel_task->s2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC2];
484 	accel_task->s.iovs[0].iov_base = src1;
485 	accel_task->s.iovs[0].iov_len = nbytes;
486 	accel_task->s.iovcnt = 1;
487 	accel_task->s2.iovs[0].iov_base = src2;
488 	accel_task->s2.iovs[0].iov_len = nbytes;
489 	accel_task->s2.iovcnt = 1;
490 	accel_task->nbytes = nbytes;
491 	accel_task->op_code = SPDK_ACCEL_OPC_COMPARE;
492 	accel_task->src_domain = NULL;
493 	accel_task->dst_domain = NULL;
494 	accel_task->flags = 0;
495 
496 	return accel_submit_task(accel_ch, accel_task);
497 }
498 
499 /* Accel framework public API for fill function */
500 int
501 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst,
502 		       uint8_t fill, uint64_t nbytes, int flags,
503 		       spdk_accel_completion_cb cb_fn, void *cb_arg)
504 {
505 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
506 	struct spdk_accel_task *accel_task;
507 
508 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
509 	if (spdk_unlikely(accel_task == NULL)) {
510 		return -ENOMEM;
511 	}
512 
513 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
514 
515 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
516 	accel_task->d.iovs[0].iov_base = dst;
517 	accel_task->d.iovs[0].iov_len = nbytes;
518 	accel_task->d.iovcnt = 1;
519 	accel_task->nbytes = nbytes;
520 	memset(&accel_task->fill_pattern, fill, sizeof(uint64_t));
521 	accel_task->op_code = SPDK_ACCEL_OPC_FILL;
522 	accel_task->src_domain = NULL;
523 	accel_task->dst_domain = NULL;
524 	ACCEL_ASSIGN_FLAGS(accel_task, flags);
525 
526 	return accel_submit_task(accel_ch, accel_task);
527 }
528 
529 /* Accel framework public API for CRC-32C function */
530 int
531 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst,
532 			 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
533 			 void *cb_arg)
534 {
535 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
536 	struct spdk_accel_task *accel_task;
537 
538 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
539 	if (spdk_unlikely(accel_task == NULL)) {
540 		return -ENOMEM;
541 	}
542 
543 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
544 
545 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
546 	accel_task->s.iovs[0].iov_base = src;
547 	accel_task->s.iovs[0].iov_len = nbytes;
548 	accel_task->s.iovcnt = 1;
549 	accel_task->nbytes = nbytes;
550 	accel_task->crc_dst = crc_dst;
551 	accel_task->seed = seed;
552 	accel_task->op_code = SPDK_ACCEL_OPC_CRC32C;
553 	accel_task->src_domain = NULL;
554 	accel_task->dst_domain = NULL;
555 	accel_task->flags = 0;
556 
557 	return accel_submit_task(accel_ch, accel_task);
558 }
559 
560 /* Accel framework public API for chained CRC-32C function */
561 int
562 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst,
563 			  struct iovec *iov, uint32_t iov_cnt, uint32_t seed,
564 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
565 {
566 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
567 	struct spdk_accel_task *accel_task;
568 
569 	if (iov == NULL) {
570 		SPDK_ERRLOG("iov should not be NULL");
571 		return -EINVAL;
572 	}
573 
574 	if (!iov_cnt) {
575 		SPDK_ERRLOG("iovcnt should not be zero value\n");
576 		return -EINVAL;
577 	}
578 
579 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
580 	if (spdk_unlikely(accel_task == NULL)) {
581 		SPDK_ERRLOG("no memory\n");
582 		assert(0);
583 		return -ENOMEM;
584 	}
585 
586 	accel_task->s.iovs = iov;
587 	accel_task->s.iovcnt = iov_cnt;
588 	accel_task->nbytes = accel_get_iovlen(iov, iov_cnt);
589 	accel_task->crc_dst = crc_dst;
590 	accel_task->seed = seed;
591 	accel_task->op_code = SPDK_ACCEL_OPC_CRC32C;
592 	accel_task->src_domain = NULL;
593 	accel_task->dst_domain = NULL;
594 	accel_task->flags = 0;
595 
596 	return accel_submit_task(accel_ch, accel_task);
597 }
598 
599 /* Accel framework public API for copy with CRC-32C function */
600 int
601 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst,
602 			      void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes,
603 			      int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
604 {
605 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
606 	struct spdk_accel_task *accel_task;
607 
608 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
609 	if (spdk_unlikely(accel_task == NULL)) {
610 		return -ENOMEM;
611 	}
612 
613 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
614 
615 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
616 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
617 	accel_task->d.iovs[0].iov_base = dst;
618 	accel_task->d.iovs[0].iov_len = nbytes;
619 	accel_task->d.iovcnt = 1;
620 	accel_task->s.iovs[0].iov_base = src;
621 	accel_task->s.iovs[0].iov_len = nbytes;
622 	accel_task->s.iovcnt = 1;
623 	accel_task->nbytes = nbytes;
624 	accel_task->crc_dst = crc_dst;
625 	accel_task->seed = seed;
626 	accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C;
627 	accel_task->src_domain = NULL;
628 	accel_task->dst_domain = NULL;
629 	ACCEL_ASSIGN_FLAGS(accel_task, flags);
630 
631 	return accel_submit_task(accel_ch, accel_task);
632 }
633 
634 /* Accel framework public API for chained copy + CRC-32C function */
635 int
636 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst,
637 			       struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst,
638 			       uint32_t seed, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
639 {
640 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
641 	struct spdk_accel_task *accel_task;
642 	uint64_t nbytes;
643 
644 	if (src_iovs == NULL) {
645 		SPDK_ERRLOG("iov should not be NULL");
646 		return -EINVAL;
647 	}
648 
649 	if (!iov_cnt) {
650 		SPDK_ERRLOG("iovcnt should not be zero value\n");
651 		return -EINVAL;
652 	}
653 
654 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
655 	if (spdk_unlikely(accel_task == NULL)) {
656 		SPDK_ERRLOG("no memory\n");
657 		assert(0);
658 		return -ENOMEM;
659 	}
660 
661 	nbytes = accel_get_iovlen(src_iovs, iov_cnt);
662 
663 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
664 
665 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
666 	accel_task->d.iovs[0].iov_base = dst;
667 	accel_task->d.iovs[0].iov_len = nbytes;
668 	accel_task->d.iovcnt = 1;
669 	accel_task->s.iovs = src_iovs;
670 	accel_task->s.iovcnt = iov_cnt;
671 	accel_task->nbytes = nbytes;
672 	accel_task->crc_dst = crc_dst;
673 	accel_task->seed = seed;
674 	accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C;
675 	accel_task->src_domain = NULL;
676 	accel_task->dst_domain = NULL;
677 	ACCEL_ASSIGN_FLAGS(accel_task, flags);
678 
679 	return accel_submit_task(accel_ch, accel_task);
680 }
681 
682 int
683 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes,
684 			   struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size, int flags,
685 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
686 {
687 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
688 	struct spdk_accel_task *accel_task;
689 
690 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
691 	if (spdk_unlikely(accel_task == NULL)) {
692 		return -ENOMEM;
693 	}
694 
695 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
696 
697 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
698 	accel_task->d.iovs[0].iov_base = dst;
699 	accel_task->d.iovs[0].iov_len = nbytes;
700 	accel_task->d.iovcnt = 1;
701 	accel_task->output_size = output_size;
702 	accel_task->s.iovs = src_iovs;
703 	accel_task->s.iovcnt = src_iovcnt;
704 	accel_task->nbytes = nbytes;
705 	accel_task->op_code = SPDK_ACCEL_OPC_COMPRESS;
706 	accel_task->src_domain = NULL;
707 	accel_task->dst_domain = NULL;
708 	ACCEL_ASSIGN_FLAGS(accel_task, flags);
709 
710 	return accel_submit_task(accel_ch, accel_task);
711 }
712 
713 int
714 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs,
715 			     size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
716 			     uint32_t *output_size, int flags, spdk_accel_completion_cb cb_fn,
717 			     void *cb_arg)
718 {
719 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
720 	struct spdk_accel_task *accel_task;
721 
722 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
723 	if (spdk_unlikely(accel_task == NULL)) {
724 		return -ENOMEM;
725 	}
726 
727 	accel_task->output_size = output_size;
728 	accel_task->s.iovs = src_iovs;
729 	accel_task->s.iovcnt = src_iovcnt;
730 	accel_task->d.iovs = dst_iovs;
731 	accel_task->d.iovcnt = dst_iovcnt;
732 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
733 	accel_task->op_code = SPDK_ACCEL_OPC_DECOMPRESS;
734 	accel_task->src_domain = NULL;
735 	accel_task->dst_domain = NULL;
736 	ACCEL_ASSIGN_FLAGS(accel_task, flags);
737 
738 	return accel_submit_task(accel_ch, accel_task);
739 }
740 
741 int
742 spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
743 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
744 			  struct iovec *src_iovs, uint32_t src_iovcnt,
745 			  uint64_t iv, uint32_t block_size, int flags,
746 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
747 {
748 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
749 	struct spdk_accel_task *accel_task;
750 
751 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
752 		return -EINVAL;
753 	}
754 
755 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
756 	if (spdk_unlikely(accel_task == NULL)) {
757 		return -ENOMEM;
758 	}
759 
760 	accel_task->crypto_key = key;
761 	accel_task->s.iovs = src_iovs;
762 	accel_task->s.iovcnt = src_iovcnt;
763 	accel_task->d.iovs = dst_iovs;
764 	accel_task->d.iovcnt = dst_iovcnt;
765 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
766 	accel_task->iv = iv;
767 	accel_task->block_size = block_size;
768 	accel_task->op_code = SPDK_ACCEL_OPC_ENCRYPT;
769 	accel_task->src_domain = NULL;
770 	accel_task->dst_domain = NULL;
771 	ACCEL_ASSIGN_FLAGS(accel_task, flags);
772 
773 	return accel_submit_task(accel_ch, accel_task);
774 }
775 
776 int
777 spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
778 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
779 			  struct iovec *src_iovs, uint32_t src_iovcnt,
780 			  uint64_t iv, uint32_t block_size, int flags,
781 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
782 {
783 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
784 	struct spdk_accel_task *accel_task;
785 
786 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
787 		return -EINVAL;
788 	}
789 
790 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
791 	if (spdk_unlikely(accel_task == NULL)) {
792 		return -ENOMEM;
793 	}
794 
795 	accel_task->crypto_key = key;
796 	accel_task->s.iovs = src_iovs;
797 	accel_task->s.iovcnt = src_iovcnt;
798 	accel_task->d.iovs = dst_iovs;
799 	accel_task->d.iovcnt = dst_iovcnt;
800 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
801 	accel_task->iv = iv;
802 	accel_task->block_size = block_size;
803 	accel_task->op_code = SPDK_ACCEL_OPC_DECRYPT;
804 	accel_task->src_domain = NULL;
805 	accel_task->dst_domain = NULL;
806 	ACCEL_ASSIGN_FLAGS(accel_task, flags);
807 
808 	return accel_submit_task(accel_ch, accel_task);
809 }
810 
811 int
812 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs,
813 		      uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
814 {
815 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
816 	struct spdk_accel_task *accel_task;
817 
818 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
819 	if (spdk_unlikely(accel_task == NULL)) {
820 		return -ENOMEM;
821 	}
822 
823 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
824 
825 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
826 	accel_task->nsrcs.srcs = sources;
827 	accel_task->nsrcs.cnt = nsrcs;
828 	accel_task->d.iovs[0].iov_base = dst;
829 	accel_task->d.iovs[0].iov_len = nbytes;
830 	accel_task->d.iovcnt = 1;
831 	accel_task->nbytes = nbytes;
832 	accel_task->op_code = SPDK_ACCEL_OPC_XOR;
833 	accel_task->src_domain = NULL;
834 	accel_task->dst_domain = NULL;
835 
836 	return accel_submit_task(accel_ch, accel_task);
837 }
838 
839 int
840 spdk_accel_submit_dif_verify(struct spdk_io_channel *ch,
841 			     struct iovec *iovs, size_t iovcnt, uint32_t num_blocks,
842 			     const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
843 			     spdk_accel_completion_cb cb_fn, void *cb_arg)
844 {
845 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
846 	struct spdk_accel_task *accel_task;
847 
848 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
849 	if (accel_task == NULL) {
850 		return -ENOMEM;
851 	}
852 
853 	accel_task->s.iovs = iovs;
854 	accel_task->s.iovcnt = iovcnt;
855 	accel_task->dif.ctx = ctx;
856 	accel_task->dif.err = err;
857 	accel_task->dif.num_blocks = num_blocks;
858 	accel_task->nbytes = num_blocks * ctx->block_size;
859 	accel_task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY;
860 	accel_task->src_domain = NULL;
861 	accel_task->dst_domain = NULL;
862 
863 	return accel_submit_task(accel_ch, accel_task);
864 }
865 
866 int
867 spdk_accel_submit_dif_generate(struct spdk_io_channel *ch,
868 			       struct iovec *iovs, size_t iovcnt, uint32_t num_blocks,
869 			       const struct spdk_dif_ctx *ctx,
870 			       spdk_accel_completion_cb cb_fn, void *cb_arg)
871 {
872 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
873 	struct spdk_accel_task *accel_task;
874 
875 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
876 	if (accel_task == NULL) {
877 		return -ENOMEM;
878 	}
879 
880 	accel_task->s.iovs = iovs;
881 	accel_task->s.iovcnt = iovcnt;
882 	accel_task->dif.ctx = ctx;
883 	accel_task->dif.num_blocks = num_blocks;
884 	accel_task->nbytes = num_blocks * ctx->block_size;
885 	accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE;
886 	accel_task->src_domain = NULL;
887 	accel_task->dst_domain = NULL;
888 
889 	return accel_submit_task(accel_ch, accel_task);
890 }
891 
892 int
893 spdk_accel_submit_dif_generate_copy(struct spdk_io_channel *ch, struct iovec *dst_iovs,
894 				    size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
895 				    uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
896 				    spdk_accel_completion_cb cb_fn, void *cb_arg)
897 {
898 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
899 	struct spdk_accel_task *accel_task;
900 
901 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
902 	if (accel_task == NULL) {
903 		return -ENOMEM;
904 	}
905 
906 	accel_task->s.iovs = src_iovs;
907 	accel_task->s.iovcnt = src_iovcnt;
908 	accel_task->d.iovs = dst_iovs;
909 	accel_task->d.iovcnt = dst_iovcnt;
910 	accel_task->dif.ctx = ctx;
911 	accel_task->dif.num_blocks = num_blocks;
912 	accel_task->nbytes = num_blocks * ctx->block_size;
913 	accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE_COPY;
914 	accel_task->src_domain = NULL;
915 	accel_task->dst_domain = NULL;
916 
917 	return accel_submit_task(accel_ch, accel_task);
918 }
919 
920 static inline struct accel_buffer *
921 accel_get_buf(struct accel_io_channel *ch, uint64_t len)
922 {
923 	struct accel_buffer *buf;
924 
925 	buf = SLIST_FIRST(&ch->buf_pool);
926 	if (spdk_unlikely(buf == NULL)) {
927 		accel_update_stats(ch, retry.bufdesc, 1);
928 		return NULL;
929 	}
930 
931 	SLIST_REMOVE_HEAD(&ch->buf_pool, link);
932 	buf->len = len;
933 	buf->buf = NULL;
934 	buf->seq = NULL;
935 	buf->cb_fn = NULL;
936 
937 	return buf;
938 }
939 
940 static inline void
941 accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf)
942 {
943 	if (buf->buf != NULL) {
944 		spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len);
945 	}
946 
947 	SLIST_INSERT_HEAD(&ch->buf_pool, buf, link);
948 }
949 
950 static inline struct spdk_accel_sequence *
951 accel_sequence_get(struct accel_io_channel *ch)
952 {
953 	struct spdk_accel_sequence *seq;
954 
955 	seq = SLIST_FIRST(&ch->seq_pool);
956 	if (spdk_unlikely(seq == NULL)) {
957 		accel_update_stats(ch, retry.sequence, 1);
958 		return NULL;
959 	}
960 
961 	SLIST_REMOVE_HEAD(&ch->seq_pool, link);
962 
963 	TAILQ_INIT(&seq->tasks);
964 	SLIST_INIT(&seq->bounce_bufs);
965 
966 	seq->ch = ch;
967 	seq->status = 0;
968 	seq->state = ACCEL_SEQUENCE_STATE_INIT;
969 	seq->in_process_sequence = false;
970 
971 	return seq;
972 }
973 
974 static inline void
975 accel_sequence_put(struct spdk_accel_sequence *seq)
976 {
977 	struct accel_io_channel *ch = seq->ch;
978 	struct accel_buffer *buf;
979 
980 	while (!SLIST_EMPTY(&seq->bounce_bufs)) {
981 		buf = SLIST_FIRST(&seq->bounce_bufs);
982 		SLIST_REMOVE_HEAD(&seq->bounce_bufs, link);
983 		accel_put_buf(seq->ch, buf);
984 	}
985 
986 	assert(TAILQ_EMPTY(&seq->tasks));
987 	seq->ch = NULL;
988 
989 	SLIST_INSERT_HEAD(&ch->seq_pool, seq, link);
990 }
991 
992 static void accel_sequence_task_cb(void *cb_arg, int status);
993 
994 static inline struct spdk_accel_task *
995 accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq,
996 			spdk_accel_step_cb cb_fn, void *cb_arg)
997 {
998 	struct spdk_accel_task *task;
999 
1000 	task = _get_task(ch, NULL, NULL);
1001 	if (spdk_unlikely(task == NULL)) {
1002 		return task;
1003 	}
1004 
1005 	task->step_cb_fn = cb_fn;
1006 	task->cb_arg = cb_arg;
1007 	task->seq = seq;
1008 
1009 	return task;
1010 }
1011 
1012 int
1013 spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1014 		       struct iovec *dst_iovs, uint32_t dst_iovcnt,
1015 		       struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1016 		       struct iovec *src_iovs, uint32_t src_iovcnt,
1017 		       struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1018 		       int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
1019 {
1020 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1021 	struct spdk_accel_task *task;
1022 	struct spdk_accel_sequence *seq = *pseq;
1023 
1024 	if (seq == NULL) {
1025 		seq = accel_sequence_get(accel_ch);
1026 		if (spdk_unlikely(seq == NULL)) {
1027 			return -ENOMEM;
1028 		}
1029 	}
1030 
1031 	assert(seq->ch == accel_ch);
1032 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1033 	if (spdk_unlikely(task == NULL)) {
1034 		if (*pseq == NULL) {
1035 			accel_sequence_put(seq);
1036 		}
1037 
1038 		return -ENOMEM;
1039 	}
1040 
1041 	task->dst_domain = dst_domain;
1042 	task->dst_domain_ctx = dst_domain_ctx;
1043 	task->d.iovs = dst_iovs;
1044 	task->d.iovcnt = dst_iovcnt;
1045 	task->src_domain = src_domain;
1046 	task->src_domain_ctx = src_domain_ctx;
1047 	task->s.iovs = src_iovs;
1048 	task->s.iovcnt = src_iovcnt;
1049 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1050 	task->op_code = SPDK_ACCEL_OPC_COPY;
1051 	ACCEL_ASSIGN_FLAGS(task, flags);
1052 
1053 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1054 	*pseq = seq;
1055 
1056 	return 0;
1057 }
1058 
1059 int
1060 spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1061 		       void *buf, uint64_t len,
1062 		       struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern,
1063 		       int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
1064 {
1065 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1066 	struct spdk_accel_task *task;
1067 	struct spdk_accel_sequence *seq = *pseq;
1068 
1069 	if (seq == NULL) {
1070 		seq = accel_sequence_get(accel_ch);
1071 		if (spdk_unlikely(seq == NULL)) {
1072 			return -ENOMEM;
1073 		}
1074 	}
1075 
1076 	assert(seq->ch == accel_ch);
1077 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1078 	if (spdk_unlikely(task == NULL)) {
1079 		if (*pseq == NULL) {
1080 			accel_sequence_put(seq);
1081 		}
1082 
1083 		return -ENOMEM;
1084 	}
1085 
1086 	memset(&task->fill_pattern, pattern, sizeof(uint64_t));
1087 
1088 	task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1089 	if (spdk_unlikely(!task->aux)) {
1090 		SPDK_ERRLOG("Fatal problem, aux data was not allocated\n");
1091 		if (*pseq == NULL) {
1092 			accel_sequence_put((seq));
1093 		}
1094 		STAILQ_INSERT_HEAD(&task->accel_ch->task_pool, task, link);
1095 		task->seq = NULL;
1096 		assert(0);
1097 		return -ENOMEM;
1098 	}
1099 	SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1100 	task->has_aux = true;
1101 
1102 	task->d.iovs = &task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
1103 	task->d.iovs[0].iov_base = buf;
1104 	task->d.iovs[0].iov_len = len;
1105 	task->d.iovcnt = 1;
1106 	task->nbytes = len;
1107 	task->src_domain = NULL;
1108 	task->dst_domain = domain;
1109 	task->dst_domain_ctx = domain_ctx;
1110 	task->op_code = SPDK_ACCEL_OPC_FILL;
1111 	ACCEL_ASSIGN_FLAGS(task, flags);
1112 
1113 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1114 	*pseq = seq;
1115 
1116 	return 0;
1117 }
1118 
1119 int
1120 spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1121 			     struct iovec *dst_iovs, size_t dst_iovcnt,
1122 			     struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1123 			     struct iovec *src_iovs, size_t src_iovcnt,
1124 			     struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1125 			     int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
1126 {
1127 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1128 	struct spdk_accel_task *task;
1129 	struct spdk_accel_sequence *seq = *pseq;
1130 
1131 	if (seq == NULL) {
1132 		seq = accel_sequence_get(accel_ch);
1133 		if (spdk_unlikely(seq == NULL)) {
1134 			return -ENOMEM;
1135 		}
1136 	}
1137 
1138 	assert(seq->ch == accel_ch);
1139 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1140 	if (spdk_unlikely(task == NULL)) {
1141 		if (*pseq == NULL) {
1142 			accel_sequence_put(seq);
1143 		}
1144 
1145 		return -ENOMEM;
1146 	}
1147 
1148 	/* TODO: support output_size for chaining */
1149 	task->output_size = NULL;
1150 	task->dst_domain = dst_domain;
1151 	task->dst_domain_ctx = dst_domain_ctx;
1152 	task->d.iovs = dst_iovs;
1153 	task->d.iovcnt = dst_iovcnt;
1154 	task->src_domain = src_domain;
1155 	task->src_domain_ctx = src_domain_ctx;
1156 	task->s.iovs = src_iovs;
1157 	task->s.iovcnt = src_iovcnt;
1158 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1159 	task->op_code = SPDK_ACCEL_OPC_DECOMPRESS;
1160 	ACCEL_ASSIGN_FLAGS(task, flags);
1161 
1162 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1163 	*pseq = seq;
1164 
1165 	return 0;
1166 }
1167 
1168 int
1169 spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1170 			  struct spdk_accel_crypto_key *key,
1171 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
1172 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1173 			  struct iovec *src_iovs, uint32_t src_iovcnt,
1174 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1175 			  uint64_t iv, uint32_t block_size, int flags,
1176 			  spdk_accel_step_cb cb_fn, void *cb_arg)
1177 {
1178 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1179 	struct spdk_accel_task *task;
1180 	struct spdk_accel_sequence *seq = *pseq;
1181 
1182 	assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size);
1183 
1184 	if (seq == NULL) {
1185 		seq = accel_sequence_get(accel_ch);
1186 		if (spdk_unlikely(seq == NULL)) {
1187 			return -ENOMEM;
1188 		}
1189 	}
1190 
1191 	assert(seq->ch == accel_ch);
1192 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1193 	if (spdk_unlikely(task == NULL)) {
1194 		if (*pseq == NULL) {
1195 			accel_sequence_put(seq);
1196 		}
1197 
1198 		return -ENOMEM;
1199 	}
1200 
1201 	task->crypto_key = key;
1202 	task->src_domain = src_domain;
1203 	task->src_domain_ctx = src_domain_ctx;
1204 	task->s.iovs = src_iovs;
1205 	task->s.iovcnt = src_iovcnt;
1206 	task->dst_domain = dst_domain;
1207 	task->dst_domain_ctx = dst_domain_ctx;
1208 	task->d.iovs = dst_iovs;
1209 	task->d.iovcnt = dst_iovcnt;
1210 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1211 	task->iv = iv;
1212 	task->block_size = block_size;
1213 	task->op_code = SPDK_ACCEL_OPC_ENCRYPT;
1214 	ACCEL_ASSIGN_FLAGS(task, flags);
1215 
1216 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1217 	*pseq = seq;
1218 
1219 	return 0;
1220 }
1221 
1222 int
1223 spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1224 			  struct spdk_accel_crypto_key *key,
1225 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
1226 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1227 			  struct iovec *src_iovs, uint32_t src_iovcnt,
1228 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1229 			  uint64_t iv, uint32_t block_size, int flags,
1230 			  spdk_accel_step_cb cb_fn, void *cb_arg)
1231 {
1232 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1233 	struct spdk_accel_task *task;
1234 	struct spdk_accel_sequence *seq = *pseq;
1235 
1236 	assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size);
1237 
1238 	if (seq == NULL) {
1239 		seq = accel_sequence_get(accel_ch);
1240 		if (spdk_unlikely(seq == NULL)) {
1241 			return -ENOMEM;
1242 		}
1243 	}
1244 
1245 	assert(seq->ch == accel_ch);
1246 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1247 	if (spdk_unlikely(task == NULL)) {
1248 		if (*pseq == NULL) {
1249 			accel_sequence_put(seq);
1250 		}
1251 
1252 		return -ENOMEM;
1253 	}
1254 
1255 	task->crypto_key = key;
1256 	task->src_domain = src_domain;
1257 	task->src_domain_ctx = src_domain_ctx;
1258 	task->s.iovs = src_iovs;
1259 	task->s.iovcnt = src_iovcnt;
1260 	task->dst_domain = dst_domain;
1261 	task->dst_domain_ctx = dst_domain_ctx;
1262 	task->d.iovs = dst_iovs;
1263 	task->d.iovcnt = dst_iovcnt;
1264 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1265 	task->iv = iv;
1266 	task->block_size = block_size;
1267 	task->op_code = SPDK_ACCEL_OPC_DECRYPT;
1268 	ACCEL_ASSIGN_FLAGS(task, flags);
1269 
1270 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1271 	*pseq = seq;
1272 
1273 	return 0;
1274 }
1275 
1276 int
1277 spdk_accel_append_crc32c(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1278 			 uint32_t *dst, struct iovec *iovs, uint32_t iovcnt,
1279 			 struct spdk_memory_domain *domain, void *domain_ctx,
1280 			 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg)
1281 {
1282 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1283 	struct spdk_accel_task *task;
1284 	struct spdk_accel_sequence *seq = *pseq;
1285 
1286 	if (seq == NULL) {
1287 		seq = accel_sequence_get(accel_ch);
1288 		if (spdk_unlikely(seq == NULL)) {
1289 			return -ENOMEM;
1290 		}
1291 	}
1292 
1293 	assert(seq->ch == accel_ch);
1294 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1295 	if (spdk_unlikely(task == NULL)) {
1296 		if (*pseq == NULL) {
1297 			accel_sequence_put(seq);
1298 		}
1299 
1300 		return -ENOMEM;
1301 	}
1302 
1303 	task->s.iovs = iovs;
1304 	task->s.iovcnt = iovcnt;
1305 	task->src_domain = domain;
1306 	task->src_domain_ctx = domain_ctx;
1307 	task->nbytes = accel_get_iovlen(iovs, iovcnt);
1308 	task->crc_dst = dst;
1309 	task->seed = seed;
1310 	task->op_code = SPDK_ACCEL_OPC_CRC32C;
1311 	task->dst_domain = NULL;
1312 
1313 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1314 	*pseq = seq;
1315 
1316 	return 0;
1317 }
1318 
1319 int
1320 spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf,
1321 		   struct spdk_memory_domain **domain, void **domain_ctx)
1322 {
1323 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1324 	struct accel_buffer *accel_buf;
1325 
1326 	accel_buf = accel_get_buf(accel_ch, len);
1327 	if (spdk_unlikely(accel_buf == NULL)) {
1328 		return -ENOMEM;
1329 	}
1330 
1331 	accel_buf->ch = accel_ch;
1332 
1333 	/* We always return the same pointer and identify the buffers through domain_ctx */
1334 	*buf = ACCEL_BUFFER_BASE;
1335 	*domain_ctx = accel_buf;
1336 	*domain = g_accel_domain;
1337 
1338 	return 0;
1339 }
1340 
1341 void
1342 spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf,
1343 		   struct spdk_memory_domain *domain, void *domain_ctx)
1344 {
1345 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1346 	struct accel_buffer *accel_buf = domain_ctx;
1347 
1348 	assert(domain == g_accel_domain);
1349 	assert(buf == ACCEL_BUFFER_BASE);
1350 
1351 	accel_put_buf(accel_ch, accel_buf);
1352 }
1353 
1354 static void
1355 accel_sequence_complete_task(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1356 {
1357 	struct accel_io_channel *ch = seq->ch;
1358 	spdk_accel_step_cb cb_fn;
1359 	void *cb_arg;
1360 
1361 	TAILQ_REMOVE(&seq->tasks, task, seq_link);
1362 	cb_fn = task->step_cb_fn;
1363 	cb_arg = task->cb_arg;
1364 	task->seq = NULL;
1365 	if (task->has_aux) {
1366 		SLIST_INSERT_HEAD(&ch->task_aux_data_pool, task->aux, link);
1367 		task->aux = NULL;
1368 		task->has_aux = false;
1369 	}
1370 	STAILQ_INSERT_HEAD(&ch->task_pool, task, link);
1371 	if (cb_fn != NULL) {
1372 		cb_fn(cb_arg);
1373 	}
1374 }
1375 
1376 static void
1377 accel_sequence_complete_tasks(struct spdk_accel_sequence *seq)
1378 {
1379 	struct spdk_accel_task *task;
1380 
1381 	while (!TAILQ_EMPTY(&seq->tasks)) {
1382 		task = TAILQ_FIRST(&seq->tasks);
1383 		accel_sequence_complete_task(seq, task);
1384 	}
1385 }
1386 
1387 static void
1388 accel_sequence_complete(struct spdk_accel_sequence *seq)
1389 {
1390 	SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, seq->status);
1391 
1392 	accel_update_stats(seq->ch, sequence_executed, 1);
1393 	if (spdk_unlikely(seq->status != 0)) {
1394 		accel_update_stats(seq->ch, sequence_failed, 1);
1395 	}
1396 
1397 	/* First notify all users that appended operations to this sequence */
1398 	accel_sequence_complete_tasks(seq);
1399 
1400 	/* Then notify the user that finished the sequence */
1401 	seq->cb_fn(seq->cb_arg, seq->status);
1402 
1403 	accel_sequence_put(seq);
1404 }
1405 
1406 static void
1407 accel_update_virt_iov(struct iovec *diov, struct iovec *siov, struct accel_buffer *accel_buf)
1408 {
1409 	uintptr_t offset;
1410 
1411 	offset = (uintptr_t)siov->iov_base & ACCEL_BUFFER_OFFSET_MASK;
1412 	assert(offset < accel_buf->len);
1413 
1414 	diov->iov_base = (char *)accel_buf->buf + offset;
1415 	diov->iov_len = siov->iov_len;
1416 }
1417 
1418 static void
1419 accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf)
1420 {
1421 	struct spdk_accel_task *task;
1422 	struct iovec *iov;
1423 
1424 	/* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks
1425 	 * in a sequence that were using it.
1426 	 */
1427 	TAILQ_FOREACH(task, &seq->tasks, seq_link) {
1428 		if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) {
1429 			if (!task->has_aux) {
1430 				task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1431 				assert(task->aux && "Can't allocate aux data structure");
1432 				task->has_aux = true;
1433 				SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1434 			}
1435 
1436 			iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_SRC];
1437 			assert(task->s.iovcnt == 1);
1438 			accel_update_virt_iov(iov, &task->s.iovs[0], buf);
1439 			task->src_domain = NULL;
1440 			task->s.iovs = iov;
1441 		}
1442 		if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) {
1443 			if (!task->has_aux) {
1444 				task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1445 				assert(task->aux && "Can't allocate aux data structure");
1446 				task->has_aux = true;
1447 				SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1448 			}
1449 
1450 			iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_DST];
1451 			assert(task->d.iovcnt == 1);
1452 			accel_update_virt_iov(iov, &task->d.iovs[0], buf);
1453 			task->dst_domain = NULL;
1454 			task->d.iovs = iov;
1455 		}
1456 	}
1457 }
1458 
1459 static void accel_process_sequence(struct spdk_accel_sequence *seq);
1460 
1461 static void
1462 accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf)
1463 {
1464 	struct accel_buffer *accel_buf;
1465 
1466 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1467 
1468 	assert(accel_buf->seq != NULL);
1469 	assert(accel_buf->buf == NULL);
1470 	accel_buf->buf = buf;
1471 
1472 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1473 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1474 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1475 	accel_process_sequence(accel_buf->seq);
1476 }
1477 
1478 static bool
1479 accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf,
1480 			 spdk_iobuf_get_cb cb_fn)
1481 {
1482 	struct accel_io_channel *ch = seq->ch;
1483 
1484 	assert(buf->seq == NULL);
1485 
1486 	buf->seq = seq;
1487 
1488 	/* Buffer might be already allocated by memory domain translation. */
1489 	if (buf->buf) {
1490 		return true;
1491 	}
1492 
1493 	buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn);
1494 	if (spdk_unlikely(buf->buf == NULL)) {
1495 		accel_update_stats(ch, retry.iobuf, 1);
1496 		return false;
1497 	}
1498 
1499 	return true;
1500 }
1501 
1502 static bool
1503 accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1504 {
1505 	/* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to
1506 	 * NULL */
1507 	if (task->src_domain == g_accel_domain) {
1508 		if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx,
1509 					      accel_iobuf_get_virtbuf_cb)) {
1510 			return false;
1511 		}
1512 
1513 		accel_sequence_set_virtbuf(seq, task->src_domain_ctx);
1514 	}
1515 
1516 	if (task->dst_domain == g_accel_domain) {
1517 		if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx,
1518 					      accel_iobuf_get_virtbuf_cb)) {
1519 			return false;
1520 		}
1521 
1522 		accel_sequence_set_virtbuf(seq, task->dst_domain_ctx);
1523 	}
1524 
1525 	return true;
1526 }
1527 
1528 static void
1529 accel_sequence_get_buf_cb(struct spdk_iobuf_entry *entry, void *buf)
1530 {
1531 	struct accel_buffer *accel_buf;
1532 
1533 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1534 
1535 	assert(accel_buf->seq != NULL);
1536 	assert(accel_buf->buf == NULL);
1537 	accel_buf->buf = buf;
1538 
1539 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1540 	accel_buf->cb_fn(accel_buf->seq, accel_buf->cb_ctx);
1541 }
1542 
1543 bool
1544 spdk_accel_alloc_sequence_buf(struct spdk_accel_sequence *seq, void *buf,
1545 			      struct spdk_memory_domain *domain, void *domain_ctx,
1546 			      spdk_accel_sequence_get_buf_cb cb_fn, void *cb_ctx)
1547 {
1548 	struct accel_buffer *accel_buf = domain_ctx;
1549 
1550 	assert(domain == g_accel_domain);
1551 	accel_buf->cb_fn = cb_fn;
1552 	accel_buf->cb_ctx = cb_ctx;
1553 
1554 	if (!accel_sequence_alloc_buf(seq, accel_buf, accel_sequence_get_buf_cb)) {
1555 		return false;
1556 	}
1557 
1558 	accel_sequence_set_virtbuf(seq, accel_buf);
1559 
1560 	return true;
1561 }
1562 
1563 struct spdk_accel_task *
1564 spdk_accel_sequence_first_task(struct spdk_accel_sequence *seq)
1565 {
1566 	return TAILQ_FIRST(&seq->tasks);
1567 }
1568 
1569 struct spdk_accel_task *
1570 spdk_accel_sequence_next_task(struct spdk_accel_task *task)
1571 {
1572 	return TAILQ_NEXT(task, seq_link);
1573 }
1574 
1575 static inline void
1576 accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs,
1577 			uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx,
1578 			struct accel_buffer *buf)
1579 {
1580 	bounce->orig_iovs = *iovs;
1581 	bounce->orig_iovcnt = *iovcnt;
1582 	bounce->orig_domain = *domain;
1583 	bounce->orig_domain_ctx = *domain_ctx;
1584 	bounce->iov.iov_base = buf->buf;
1585 	bounce->iov.iov_len = buf->len;
1586 
1587 	*iovs = &bounce->iov;
1588 	*iovcnt = 1;
1589 	*domain = NULL;
1590 }
1591 
1592 static void
1593 accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1594 {
1595 	struct spdk_accel_task *task;
1596 	struct accel_buffer *accel_buf;
1597 
1598 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1599 	assert(accel_buf->buf == NULL);
1600 	accel_buf->buf = buf;
1601 
1602 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1603 	assert(task != NULL);
1604 
1605 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1606 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1607 	assert(task->aux);
1608 	assert(task->has_aux);
1609 	accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain,
1610 				&task->src_domain_ctx, accel_buf);
1611 	accel_process_sequence(accel_buf->seq);
1612 }
1613 
1614 static void
1615 accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1616 {
1617 	struct spdk_accel_task *task;
1618 	struct accel_buffer *accel_buf;
1619 
1620 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1621 	assert(accel_buf->buf == NULL);
1622 	accel_buf->buf = buf;
1623 
1624 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1625 	assert(task != NULL);
1626 
1627 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1628 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1629 	assert(task->aux);
1630 	assert(task->has_aux);
1631 	accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain,
1632 				&task->dst_domain_ctx, accel_buf);
1633 	accel_process_sequence(accel_buf->seq);
1634 }
1635 
1636 static int
1637 accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1638 {
1639 	struct accel_buffer *buf;
1640 
1641 	if (task->src_domain != NULL) {
1642 		/* By the time we're here, accel buffers should have been allocated */
1643 		assert(task->src_domain != g_accel_domain);
1644 
1645 		if (!task->has_aux) {
1646 			task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1647 			if (spdk_unlikely(!task->aux)) {
1648 				SPDK_ERRLOG("Can't allocate aux data structure\n");
1649 				assert(0);
1650 				return -EAGAIN;
1651 			}
1652 			task->has_aux = true;
1653 			SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1654 		}
1655 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt));
1656 		if (buf == NULL) {
1657 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1658 			return -ENOMEM;
1659 		}
1660 
1661 		SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link);
1662 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) {
1663 			return -EAGAIN;
1664 		}
1665 
1666 		accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt,
1667 					&task->src_domain, &task->src_domain_ctx, buf);
1668 	}
1669 
1670 	if (task->dst_domain != NULL) {
1671 		/* By the time we're here, accel buffers should have been allocated */
1672 		assert(task->dst_domain != g_accel_domain);
1673 
1674 		if (!task->has_aux) {
1675 			task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1676 			if (spdk_unlikely(!task->aux)) {
1677 				SPDK_ERRLOG("Can't allocate aux data structure\n");
1678 				assert(0);
1679 				return -EAGAIN;
1680 			}
1681 			task->has_aux = true;
1682 			SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1683 		}
1684 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt));
1685 		if (buf == NULL) {
1686 			/* The src buffer will be released when a sequence is completed */
1687 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1688 			return -ENOMEM;
1689 		}
1690 
1691 		SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link);
1692 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) {
1693 			return -EAGAIN;
1694 		}
1695 
1696 		accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt,
1697 					&task->dst_domain, &task->dst_domain_ctx, buf);
1698 	}
1699 
1700 	return 0;
1701 }
1702 
1703 static void
1704 accel_task_pull_data_cb(void *ctx, int status)
1705 {
1706 	struct spdk_accel_sequence *seq = ctx;
1707 
1708 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1709 	if (spdk_likely(status == 0)) {
1710 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1711 	} else {
1712 		accel_sequence_set_fail(seq, status);
1713 	}
1714 
1715 	accel_process_sequence(seq);
1716 }
1717 
1718 static void
1719 accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1720 {
1721 	int rc;
1722 
1723 	assert(task->has_aux);
1724 	assert(task->aux);
1725 	assert(task->aux->bounce.s.orig_iovs != NULL);
1726 	assert(task->aux->bounce.s.orig_domain != NULL);
1727 	assert(task->aux->bounce.s.orig_domain != g_accel_domain);
1728 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1729 
1730 	rc = spdk_memory_domain_pull_data(task->aux->bounce.s.orig_domain,
1731 					  task->aux->bounce.s.orig_domain_ctx,
1732 					  task->aux->bounce.s.orig_iovs, task->aux->bounce.s.orig_iovcnt,
1733 					  task->s.iovs, task->s.iovcnt,
1734 					  accel_task_pull_data_cb, seq);
1735 	if (spdk_unlikely(rc != 0)) {
1736 		SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n",
1737 			    spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc);
1738 		accel_sequence_set_fail(seq, rc);
1739 	}
1740 }
1741 
1742 static void
1743 accel_task_push_data_cb(void *ctx, int status)
1744 {
1745 	struct spdk_accel_sequence *seq = ctx;
1746 
1747 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1748 	if (spdk_likely(status == 0)) {
1749 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1750 	} else {
1751 		accel_sequence_set_fail(seq, status);
1752 	}
1753 
1754 	accel_process_sequence(seq);
1755 }
1756 
1757 static void
1758 accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1759 {
1760 	int rc;
1761 
1762 	assert(task->has_aux);
1763 	assert(task->aux);
1764 	assert(task->aux->bounce.d.orig_iovs != NULL);
1765 	assert(task->aux->bounce.d.orig_domain != NULL);
1766 	assert(task->aux->bounce.d.orig_domain != g_accel_domain);
1767 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1768 
1769 	rc = spdk_memory_domain_push_data(task->aux->bounce.d.orig_domain,
1770 					  task->aux->bounce.d.orig_domain_ctx,
1771 					  task->aux->bounce.d.orig_iovs, task->aux->bounce.d.orig_iovcnt,
1772 					  task->d.iovs, task->d.iovcnt,
1773 					  accel_task_push_data_cb, seq);
1774 	if (spdk_unlikely(rc != 0)) {
1775 		SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n",
1776 			    spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc);
1777 		accel_sequence_set_fail(seq, rc);
1778 	}
1779 }
1780 
1781 static void
1782 accel_process_sequence(struct spdk_accel_sequence *seq)
1783 {
1784 	struct accel_io_channel *accel_ch = seq->ch;
1785 	struct spdk_accel_task *task;
1786 	enum accel_sequence_state state;
1787 	int rc;
1788 
1789 	/* Prevent recursive calls to this function */
1790 	if (spdk_unlikely(seq->in_process_sequence)) {
1791 		return;
1792 	}
1793 	seq->in_process_sequence = true;
1794 
1795 	task = TAILQ_FIRST(&seq->tasks);
1796 	do {
1797 		state = seq->state;
1798 		switch (state) {
1799 		case ACCEL_SEQUENCE_STATE_INIT:
1800 			if (g_accel_driver != NULL) {
1801 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS);
1802 				break;
1803 			}
1804 		/* Fall through */
1805 		case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF:
1806 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1807 			if (!accel_sequence_check_virtbuf(seq, task)) {
1808 				/* We couldn't allocate a buffer, wait until one is available */
1809 				break;
1810 			}
1811 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1812 		/* Fall through */
1813 		case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF:
1814 			/* If a module supports memory domains, we don't need to allocate bounce
1815 			 * buffers */
1816 			if (g_modules_opc[task->op_code].supports_memory_domains) {
1817 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1818 				break;
1819 			}
1820 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1821 			rc = accel_sequence_check_bouncebuf(seq, task);
1822 			if (spdk_unlikely(rc != 0)) {
1823 				/* We couldn't allocate a buffer, wait until one is available */
1824 				if (rc == -EAGAIN) {
1825 					break;
1826 				}
1827 				accel_sequence_set_fail(seq, rc);
1828 				break;
1829 			}
1830 			if (task->has_aux && task->s.iovs == &task->aux->bounce.s.iov) {
1831 				assert(task->aux->bounce.s.orig_iovs);
1832 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA);
1833 				break;
1834 			}
1835 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1836 		/* Fall through */
1837 		case ACCEL_SEQUENCE_STATE_EXEC_TASK:
1838 			SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n",
1839 				      g_opcode_strings[task->op_code], seq);
1840 
1841 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK);
1842 			rc = accel_submit_task(accel_ch, task);
1843 			if (spdk_unlikely(rc != 0)) {
1844 				SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n",
1845 					    g_opcode_strings[task->op_code], seq);
1846 				accel_sequence_set_fail(seq, rc);
1847 			}
1848 			break;
1849 		case ACCEL_SEQUENCE_STATE_PULL_DATA:
1850 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1851 			accel_task_pull_data(seq, task);
1852 			break;
1853 		case ACCEL_SEQUENCE_STATE_COMPLETE_TASK:
1854 			if (task->has_aux && task->d.iovs == &task->aux->bounce.d.iov) {
1855 				assert(task->aux->bounce.d.orig_iovs);
1856 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA);
1857 				break;
1858 			}
1859 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1860 			break;
1861 		case ACCEL_SEQUENCE_STATE_PUSH_DATA:
1862 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1863 			accel_task_push_data(seq, task);
1864 			break;
1865 		case ACCEL_SEQUENCE_STATE_NEXT_TASK:
1866 			accel_sequence_complete_task(seq, task);
1867 			/* Check if there are any remaining tasks */
1868 			task = TAILQ_FIRST(&seq->tasks);
1869 			if (task == NULL) {
1870 				/* Immediately return here to make sure we don't touch the sequence
1871 				 * after it's completed */
1872 				accel_sequence_complete(seq);
1873 				return;
1874 			}
1875 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT);
1876 			break;
1877 		case ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS:
1878 			assert(!TAILQ_EMPTY(&seq->tasks));
1879 
1880 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS);
1881 			rc = g_accel_driver->execute_sequence(accel_ch->driver_channel, seq);
1882 			if (spdk_unlikely(rc != 0)) {
1883 				SPDK_ERRLOG("Failed to execute sequence: %p using driver: %s\n",
1884 					    seq, g_accel_driver->name);
1885 				accel_sequence_set_fail(seq, rc);
1886 			}
1887 			break;
1888 		case ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS:
1889 			/* Get the task again, as the driver might have completed some tasks
1890 			 * synchronously */
1891 			task = TAILQ_FIRST(&seq->tasks);
1892 			if (task == NULL) {
1893 				/* Immediately return here to make sure we don't touch the sequence
1894 				 * after it's completed */
1895 				accel_sequence_complete(seq);
1896 				return;
1897 			}
1898 			/* We don't want to execute the next task through the driver, so we
1899 			 * explicitly omit the INIT state here */
1900 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1901 			break;
1902 		case ACCEL_SEQUENCE_STATE_ERROR:
1903 			/* Immediately return here to make sure we don't touch the sequence
1904 			 * after it's completed */
1905 			assert(seq->status != 0);
1906 			accel_sequence_complete(seq);
1907 			return;
1908 		case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF:
1909 		case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF:
1910 		case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA:
1911 		case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1912 		case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA:
1913 		case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS:
1914 			break;
1915 		default:
1916 			assert(0 && "bad state");
1917 			break;
1918 		}
1919 	} while (seq->state != state);
1920 
1921 	seq->in_process_sequence = false;
1922 }
1923 
1924 static void
1925 accel_sequence_task_cb(void *cb_arg, int status)
1926 {
1927 	struct spdk_accel_sequence *seq = cb_arg;
1928 	struct spdk_accel_task *task = TAILQ_FIRST(&seq->tasks);
1929 
1930 	switch (seq->state) {
1931 	case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1932 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK);
1933 		if (spdk_unlikely(status != 0)) {
1934 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n",
1935 				    g_opcode_strings[task->op_code], seq);
1936 			accel_sequence_set_fail(seq, status);
1937 		}
1938 
1939 		accel_process_sequence(seq);
1940 		break;
1941 	case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS:
1942 		assert(g_accel_driver != NULL);
1943 		/* Immediately remove the task from the outstanding list to make sure the next call
1944 		 * to spdk_accel_sequence_first_task() doesn't return it */
1945 		accel_sequence_complete_task(seq, task);
1946 		if (spdk_unlikely(status != 0)) {
1947 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p through "
1948 				    "driver: %s\n", g_opcode_strings[task->op_code], seq,
1949 				    g_accel_driver->name);
1950 			/* Update status without using accel_sequence_set_fail() to avoid changing
1951 			 * seq's state to ERROR until driver calls spdk_accel_sequence_continue() */
1952 			seq->status = status;
1953 		}
1954 		break;
1955 	default:
1956 		assert(0 && "bad state");
1957 		break;
1958 	}
1959 }
1960 
1961 void
1962 spdk_accel_sequence_continue(struct spdk_accel_sequence *seq)
1963 {
1964 	assert(g_accel_driver != NULL);
1965 	assert(seq->state == ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS);
1966 
1967 	if (spdk_likely(seq->status == 0)) {
1968 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS);
1969 	} else {
1970 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
1971 	}
1972 
1973 	accel_process_sequence(seq);
1974 }
1975 
1976 static bool
1977 accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt)
1978 {
1979 	/* For now, just do a dumb check that the iovecs arrays are exactly the same */
1980 	if (iovacnt != iovbcnt) {
1981 		return false;
1982 	}
1983 
1984 	return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0;
1985 }
1986 
1987 static bool
1988 accel_task_set_dstbuf(struct spdk_accel_task *task, struct spdk_accel_task *next)
1989 {
1990 	struct spdk_accel_task *prev;
1991 
1992 	switch (task->op_code) {
1993 	case SPDK_ACCEL_OPC_DECOMPRESS:
1994 	case SPDK_ACCEL_OPC_FILL:
1995 	case SPDK_ACCEL_OPC_ENCRYPT:
1996 	case SPDK_ACCEL_OPC_DECRYPT:
1997 		if (task->dst_domain != next->src_domain) {
1998 			return false;
1999 		}
2000 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
2001 					next->s.iovs, next->s.iovcnt)) {
2002 			return false;
2003 		}
2004 		task->d.iovs = next->d.iovs;
2005 		task->d.iovcnt = next->d.iovcnt;
2006 		task->dst_domain = next->dst_domain;
2007 		task->dst_domain_ctx = next->dst_domain_ctx;
2008 		break;
2009 	case SPDK_ACCEL_OPC_CRC32C:
2010 		/* crc32 is special, because it doesn't have a dst buffer */
2011 		if (task->src_domain != next->src_domain) {
2012 			return false;
2013 		}
2014 		if (!accel_compare_iovs(task->s.iovs, task->s.iovcnt,
2015 					next->s.iovs, next->s.iovcnt)) {
2016 			return false;
2017 		}
2018 		/* We can only change crc32's buffer if we can change previous task's buffer */
2019 		prev = TAILQ_PREV(task, accel_sequence_tasks, seq_link);
2020 		if (prev == NULL) {
2021 			return false;
2022 		}
2023 		if (!accel_task_set_dstbuf(prev, next)) {
2024 			return false;
2025 		}
2026 		task->s.iovs = next->d.iovs;
2027 		task->s.iovcnt = next->d.iovcnt;
2028 		task->src_domain = next->dst_domain;
2029 		task->src_domain_ctx = next->dst_domain_ctx;
2030 		break;
2031 	default:
2032 		return false;
2033 	}
2034 
2035 	return true;
2036 }
2037 
2038 static void
2039 accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task,
2040 			   struct spdk_accel_task **next_task)
2041 {
2042 	struct spdk_accel_task *next = *next_task;
2043 
2044 	switch (task->op_code) {
2045 	case SPDK_ACCEL_OPC_COPY:
2046 		/* We only allow changing src of operations that actually have a src, e.g. we never
2047 		 * do it for fill.  Theoretically, it is possible, but we'd have to be careful to
2048 		 * change the src of the operation after fill (which in turn could also be a fill).
2049 		 * So, for the sake of simplicity, skip this type of operations for now.
2050 		 */
2051 		if (next->op_code != SPDK_ACCEL_OPC_DECOMPRESS &&
2052 		    next->op_code != SPDK_ACCEL_OPC_COPY &&
2053 		    next->op_code != SPDK_ACCEL_OPC_ENCRYPT &&
2054 		    next->op_code != SPDK_ACCEL_OPC_DECRYPT &&
2055 		    next->op_code != SPDK_ACCEL_OPC_CRC32C) {
2056 			break;
2057 		}
2058 		if (task->dst_domain != next->src_domain) {
2059 			break;
2060 		}
2061 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
2062 					next->s.iovs, next->s.iovcnt)) {
2063 			break;
2064 		}
2065 		next->s.iovs = task->s.iovs;
2066 		next->s.iovcnt = task->s.iovcnt;
2067 		next->src_domain = task->src_domain;
2068 		next->src_domain_ctx = task->src_domain_ctx;
2069 		accel_sequence_complete_task(seq, task);
2070 		break;
2071 	case SPDK_ACCEL_OPC_DECOMPRESS:
2072 	case SPDK_ACCEL_OPC_FILL:
2073 	case SPDK_ACCEL_OPC_ENCRYPT:
2074 	case SPDK_ACCEL_OPC_DECRYPT:
2075 	case SPDK_ACCEL_OPC_CRC32C:
2076 		/* We can only merge tasks when one of them is a copy */
2077 		if (next->op_code != SPDK_ACCEL_OPC_COPY) {
2078 			break;
2079 		}
2080 		if (!accel_task_set_dstbuf(task, next)) {
2081 			break;
2082 		}
2083 		/* We're removing next_task from the tasks queue, so we need to update its pointer,
2084 		 * so that the TAILQ_FOREACH_SAFE() loop below works correctly */
2085 		*next_task = TAILQ_NEXT(next, seq_link);
2086 		accel_sequence_complete_task(seq, next);
2087 		break;
2088 	default:
2089 		assert(0 && "bad opcode");
2090 		break;
2091 	}
2092 }
2093 
2094 void
2095 spdk_accel_sequence_finish(struct spdk_accel_sequence *seq,
2096 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
2097 {
2098 	struct spdk_accel_task *task, *next;
2099 
2100 	/* Try to remove any copy operations if possible */
2101 	TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) {
2102 		if (next == NULL) {
2103 			break;
2104 		}
2105 		accel_sequence_merge_tasks(seq, task, &next);
2106 	}
2107 
2108 	seq->cb_fn = cb_fn;
2109 	seq->cb_arg = cb_arg;
2110 
2111 	accel_process_sequence(seq);
2112 }
2113 
2114 void
2115 spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq)
2116 {
2117 	struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks);
2118 	struct spdk_accel_task *task;
2119 
2120 	TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link);
2121 
2122 	while (!TAILQ_EMPTY(&tasks)) {
2123 		task = TAILQ_FIRST(&tasks);
2124 		TAILQ_REMOVE(&tasks, task, seq_link);
2125 		TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link);
2126 	}
2127 }
2128 
2129 void
2130 spdk_accel_sequence_abort(struct spdk_accel_sequence *seq)
2131 {
2132 	if (seq == NULL) {
2133 		return;
2134 	}
2135 
2136 	accel_sequence_complete_tasks(seq);
2137 	accel_sequence_put(seq);
2138 }
2139 
2140 struct spdk_memory_domain *
2141 spdk_accel_get_memory_domain(void)
2142 {
2143 	return g_accel_domain;
2144 }
2145 
2146 static struct spdk_accel_module_if *
2147 _module_find_by_name(const char *name)
2148 {
2149 	struct spdk_accel_module_if *accel_module = NULL;
2150 
2151 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2152 		if (strcmp(name, accel_module->name) == 0) {
2153 			break;
2154 		}
2155 	}
2156 
2157 	return accel_module;
2158 }
2159 
2160 static inline struct spdk_accel_crypto_key *
2161 _accel_crypto_key_get(const char *name)
2162 {
2163 	struct spdk_accel_crypto_key *key;
2164 
2165 	assert(spdk_spin_held(&g_keyring_spin));
2166 
2167 	TAILQ_FOREACH(key, &g_keyring, link) {
2168 		if (strcmp(name, key->param.key_name) == 0) {
2169 			return key;
2170 		}
2171 	}
2172 
2173 	return NULL;
2174 }
2175 
2176 static void
2177 accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key)
2178 {
2179 	if (key->param.hex_key) {
2180 		spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2);
2181 		free(key->param.hex_key);
2182 	}
2183 	if (key->param.hex_key2) {
2184 		spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2);
2185 		free(key->param.hex_key2);
2186 	}
2187 	free(key->param.tweak_mode);
2188 	free(key->param.key_name);
2189 	free(key->param.cipher);
2190 	if (key->key) {
2191 		spdk_memset_s(key->key, key->key_size, 0, key->key_size);
2192 		free(key->key);
2193 	}
2194 	if (key->key2) {
2195 		spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size);
2196 		free(key->key2);
2197 	}
2198 	free(key);
2199 }
2200 
2201 static void
2202 accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key)
2203 {
2204 	assert(key->module_if);
2205 	assert(key->module_if->crypto_key_deinit);
2206 
2207 	key->module_if->crypto_key_deinit(key);
2208 	accel_crypto_key_free_mem(key);
2209 }
2210 
2211 /*
2212  * This function mitigates a timing side channel which could be caused by using strcmp()
2213  * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in
2214  * the article [1] for more details
2215  * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html
2216  */
2217 static bool
2218 accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len)
2219 {
2220 	size_t i;
2221 	volatile size_t x = k1_len ^ k2_len;
2222 
2223 	for (i = 0; ((i < k1_len) & (i < k2_len)); i++) {
2224 		x |= k1[i] ^ k2[i];
2225 	}
2226 
2227 	return x == 0;
2228 }
2229 
2230 static const char *g_tweak_modes[] = {
2231 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA] = "SIMPLE_LBA",
2232 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_JOIN_NEG_LBA_WITH_LBA] = "JOIN_NEG_LBA_WITH_LBA",
2233 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_FULL_LBA] = "INCR_512_FULL_LBA",
2234 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_UPPER_LBA] = "INCR_512_UPPER_LBA",
2235 };
2236 
2237 static const char *g_ciphers[] = {
2238 	[SPDK_ACCEL_CIPHER_AES_CBC] = "AES_CBC",
2239 	[SPDK_ACCEL_CIPHER_AES_XTS] = "AES_XTS",
2240 };
2241 
2242 int
2243 spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param)
2244 {
2245 	struct spdk_accel_module_if *module;
2246 	struct spdk_accel_crypto_key *key;
2247 	size_t hex_key_size, hex_key2_size;
2248 	bool found = false;
2249 	size_t i;
2250 	int rc;
2251 
2252 	if (!param || !param->hex_key || !param->cipher || !param->key_name) {
2253 		return -EINVAL;
2254 	}
2255 
2256 	if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) {
2257 		/* hardly ever possible, but let's check and warn the user */
2258 		SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n");
2259 	}
2260 	module = g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module;
2261 
2262 	if (!module) {
2263 		SPDK_ERRLOG("No accel module found assigned for crypto operation\n");
2264 		return -ENOENT;
2265 	}
2266 
2267 	if (!module->crypto_key_init || !module->crypto_supports_cipher) {
2268 		SPDK_ERRLOG("Module %s doesn't support crypto operations\n", module->name);
2269 		return -ENOTSUP;
2270 	}
2271 
2272 	key = calloc(1, sizeof(*key));
2273 	if (!key) {
2274 		return -ENOMEM;
2275 	}
2276 
2277 	key->param.key_name = strdup(param->key_name);
2278 	if (!key->param.key_name) {
2279 		rc = -ENOMEM;
2280 		goto error;
2281 	}
2282 
2283 	for (i = 0; i < SPDK_COUNTOF(g_ciphers); ++i) {
2284 		assert(g_ciphers[i]);
2285 
2286 		if (strncmp(param->cipher, g_ciphers[i], strlen(g_ciphers[i])) == 0) {
2287 			key->cipher = i;
2288 			found = true;
2289 			break;
2290 		}
2291 	}
2292 
2293 	if (!found) {
2294 		SPDK_ERRLOG("Failed to parse cipher\n");
2295 		rc = -EINVAL;
2296 		goto error;
2297 	}
2298 
2299 	key->param.cipher = strdup(param->cipher);
2300 	if (!key->param.cipher) {
2301 		rc = -ENOMEM;
2302 		goto error;
2303 	}
2304 
2305 	hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2306 	if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
2307 		SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2308 		rc = -EINVAL;
2309 		goto error;
2310 	}
2311 
2312 	if (hex_key_size == 0) {
2313 		SPDK_ERRLOG("key1 size cannot be 0\n");
2314 		rc = -EINVAL;
2315 		goto error;
2316 	}
2317 
2318 	key->param.hex_key = strdup(param->hex_key);
2319 	if (!key->param.hex_key) {
2320 		rc = -ENOMEM;
2321 		goto error;
2322 	}
2323 
2324 	key->key_size = hex_key_size / 2;
2325 	key->key = spdk_unhexlify(key->param.hex_key);
2326 	if (!key->key) {
2327 		SPDK_ERRLOG("Failed to unhexlify key1\n");
2328 		rc = -EINVAL;
2329 		goto error;
2330 	}
2331 
2332 	if (param->hex_key2) {
2333 		hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2334 		if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
2335 			SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2336 			rc = -EINVAL;
2337 			goto error;
2338 		}
2339 
2340 		if (hex_key2_size == 0) {
2341 			SPDK_ERRLOG("key2 size cannot be 0\n");
2342 			rc = -EINVAL;
2343 			goto error;
2344 		}
2345 
2346 		key->param.hex_key2 = strdup(param->hex_key2);
2347 		if (!key->param.hex_key2) {
2348 			rc = -ENOMEM;
2349 			goto error;
2350 		}
2351 
2352 		key->key2_size = hex_key2_size / 2;
2353 		key->key2 = spdk_unhexlify(key->param.hex_key2);
2354 		if (!key->key2) {
2355 			SPDK_ERRLOG("Failed to unhexlify key2\n");
2356 			rc = -EINVAL;
2357 			goto error;
2358 		}
2359 	}
2360 
2361 	key->tweak_mode = ACCEL_CRYPTO_TWEAK_MODE_DEFAULT;
2362 	if (param->tweak_mode) {
2363 		found = false;
2364 
2365 		key->param.tweak_mode = strdup(param->tweak_mode);
2366 		if (!key->param.tweak_mode) {
2367 			rc = -ENOMEM;
2368 			goto error;
2369 		}
2370 
2371 		for (i = 0; i < SPDK_COUNTOF(g_tweak_modes); ++i) {
2372 			assert(g_tweak_modes[i]);
2373 
2374 			if (strncmp(param->tweak_mode, g_tweak_modes[i], strlen(g_tweak_modes[i])) == 0) {
2375 				key->tweak_mode = i;
2376 				found = true;
2377 				break;
2378 			}
2379 		}
2380 
2381 		if (!found) {
2382 			SPDK_ERRLOG("Failed to parse tweak mode\n");
2383 			rc = -EINVAL;
2384 			goto error;
2385 		}
2386 	}
2387 
2388 	if ((!module->crypto_supports_tweak_mode && key->tweak_mode != ACCEL_CRYPTO_TWEAK_MODE_DEFAULT) ||
2389 	    (module->crypto_supports_tweak_mode && !module->crypto_supports_tweak_mode(key->tweak_mode))) {
2390 		SPDK_ERRLOG("Module %s doesn't support %s tweak mode\n", module->name,
2391 			    g_tweak_modes[key->tweak_mode]);
2392 		rc = -EINVAL;
2393 		goto error;
2394 	}
2395 
2396 	if (!module->crypto_supports_cipher(key->cipher, key->key_size)) {
2397 		SPDK_ERRLOG("Module %s doesn't support %s cipher with %zu key size\n", module->name,
2398 			    g_ciphers[key->cipher], key->key_size);
2399 		rc = -EINVAL;
2400 		goto error;
2401 	}
2402 
2403 	if (key->cipher == SPDK_ACCEL_CIPHER_AES_XTS) {
2404 		if (!key->key2) {
2405 			SPDK_ERRLOG("%s key2 is missing\n", g_ciphers[key->cipher]);
2406 			rc = -EINVAL;
2407 			goto error;
2408 		}
2409 
2410 		if (key->key_size != key->key2_size) {
2411 			SPDK_ERRLOG("%s key size %zu is not equal to key2 size %zu\n", g_ciphers[key->cipher],
2412 				    key->key_size,
2413 				    key->key2_size);
2414 			rc = -EINVAL;
2415 			goto error;
2416 		}
2417 
2418 		if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) {
2419 			SPDK_ERRLOG("%s identical keys are not secure\n", g_ciphers[key->cipher]);
2420 			rc = -EINVAL;
2421 			goto error;
2422 		}
2423 	}
2424 
2425 	if (key->cipher == SPDK_ACCEL_CIPHER_AES_CBC) {
2426 		if (key->key2_size) {
2427 			SPDK_ERRLOG("%s doesn't use key2\n", g_ciphers[key->cipher]);
2428 			rc = -EINVAL;
2429 			goto error;
2430 		}
2431 	}
2432 
2433 	key->module_if = module;
2434 
2435 	spdk_spin_lock(&g_keyring_spin);
2436 	if (_accel_crypto_key_get(param->key_name)) {
2437 		rc = -EEXIST;
2438 	} else {
2439 		rc = module->crypto_key_init(key);
2440 		if (rc) {
2441 			SPDK_ERRLOG("Module %s failed to initialize crypto key\n", module->name);
2442 		} else {
2443 			TAILQ_INSERT_TAIL(&g_keyring, key, link);
2444 		}
2445 	}
2446 	spdk_spin_unlock(&g_keyring_spin);
2447 
2448 	if (rc) {
2449 		goto error;
2450 	}
2451 
2452 	return 0;
2453 
2454 error:
2455 	accel_crypto_key_free_mem(key);
2456 	return rc;
2457 }
2458 
2459 int
2460 spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key)
2461 {
2462 	if (!key || !key->module_if) {
2463 		return -EINVAL;
2464 	}
2465 
2466 	spdk_spin_lock(&g_keyring_spin);
2467 	if (!_accel_crypto_key_get(key->param.key_name)) {
2468 		spdk_spin_unlock(&g_keyring_spin);
2469 		return -ENOENT;
2470 	}
2471 	TAILQ_REMOVE(&g_keyring, key, link);
2472 	spdk_spin_unlock(&g_keyring_spin);
2473 
2474 	accel_crypto_key_destroy_unsafe(key);
2475 
2476 	return 0;
2477 }
2478 
2479 struct spdk_accel_crypto_key *
2480 spdk_accel_crypto_key_get(const char *name)
2481 {
2482 	struct spdk_accel_crypto_key *key;
2483 
2484 	spdk_spin_lock(&g_keyring_spin);
2485 	key = _accel_crypto_key_get(name);
2486 	spdk_spin_unlock(&g_keyring_spin);
2487 
2488 	return key;
2489 }
2490 
2491 /* Helper function when accel modules register with the framework. */
2492 void
2493 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module)
2494 {
2495 	struct spdk_accel_module_if *tmp;
2496 
2497 	if (_module_find_by_name(accel_module->name)) {
2498 		SPDK_NOTICELOG("Module %s already registered\n", accel_module->name);
2499 		assert(false);
2500 		return;
2501 	}
2502 
2503 	TAILQ_FOREACH(tmp, &spdk_accel_module_list, tailq) {
2504 		if (accel_module->priority < tmp->priority) {
2505 			break;
2506 		}
2507 	}
2508 
2509 	if (tmp != NULL) {
2510 		TAILQ_INSERT_BEFORE(tmp, accel_module, tailq);
2511 	} else {
2512 		TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq);
2513 	}
2514 }
2515 
2516 /* Framework level channel create callback. */
2517 static int
2518 accel_create_channel(void *io_device, void *ctx_buf)
2519 {
2520 	struct accel_io_channel	*accel_ch = ctx_buf;
2521 	struct spdk_accel_task *accel_task;
2522 	struct spdk_accel_task_aux_data *accel_task_aux;
2523 	struct spdk_accel_sequence *seq;
2524 	struct accel_buffer *buf;
2525 	size_t task_size_aligned;
2526 	uint8_t *task_mem;
2527 	uint32_t i = 0, j;
2528 	int rc;
2529 
2530 	task_size_aligned = SPDK_ALIGN_CEIL(g_max_accel_module_size, SPDK_CACHE_LINE_SIZE);
2531 	accel_ch->task_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE,
2532 				   g_opts.task_count * task_size_aligned);
2533 	if (!accel_ch->task_pool_base) {
2534 		return -ENOMEM;
2535 	}
2536 	memset(accel_ch->task_pool_base, 0, g_opts.task_count * task_size_aligned);
2537 
2538 	accel_ch->seq_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE,
2539 						g_opts.sequence_count * sizeof(struct spdk_accel_sequence));
2540 	if (accel_ch->seq_pool_base == NULL) {
2541 		goto err;
2542 	}
2543 	memset(accel_ch->seq_pool_base, 0, g_opts.sequence_count * sizeof(struct spdk_accel_sequence));
2544 
2545 	accel_ch->task_aux_data_base = calloc(g_opts.task_count, sizeof(struct spdk_accel_task_aux_data));
2546 	if (accel_ch->task_aux_data_base == NULL) {
2547 		goto err;
2548 	}
2549 
2550 	accel_ch->buf_pool_base = calloc(g_opts.buf_count, sizeof(struct accel_buffer));
2551 	if (accel_ch->buf_pool_base == NULL) {
2552 		goto err;
2553 	}
2554 
2555 	STAILQ_INIT(&accel_ch->task_pool);
2556 	SLIST_INIT(&accel_ch->task_aux_data_pool);
2557 	SLIST_INIT(&accel_ch->seq_pool);
2558 	SLIST_INIT(&accel_ch->buf_pool);
2559 
2560 	task_mem = accel_ch->task_pool_base;
2561 	for (i = 0; i < g_opts.task_count; i++) {
2562 		accel_task = (struct spdk_accel_task *)task_mem;
2563 		accel_task->aux = NULL;
2564 		STAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link);
2565 		task_mem += task_size_aligned;
2566 		accel_task_aux = &accel_ch->task_aux_data_base[i];
2567 		SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task_aux, link);
2568 	}
2569 	for (i = 0; i < g_opts.sequence_count; i++) {
2570 		seq = &accel_ch->seq_pool_base[i];
2571 		SLIST_INSERT_HEAD(&accel_ch->seq_pool, seq, link);
2572 	}
2573 	for (i = 0; i < g_opts.buf_count; i++) {
2574 		buf = &accel_ch->buf_pool_base[i];
2575 		SLIST_INSERT_HEAD(&accel_ch->buf_pool, buf, link);
2576 	}
2577 
2578 	/* Assign modules and get IO channels for each */
2579 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
2580 		accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel();
2581 		/* This can happen if idxd runs out of channels. */
2582 		if (accel_ch->module_ch[i] == NULL) {
2583 			SPDK_ERRLOG("Module %s failed to get io channel\n", g_modules_opc[i].module->name);
2584 			goto err;
2585 		}
2586 	}
2587 
2588 	if (g_accel_driver != NULL) {
2589 		accel_ch->driver_channel = g_accel_driver->get_io_channel();
2590 		if (accel_ch->driver_channel == NULL) {
2591 			SPDK_ERRLOG("Failed to get driver's IO channel\n");
2592 			goto err;
2593 		}
2594 	}
2595 
2596 	rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", g_opts.small_cache_size,
2597 				     g_opts.large_cache_size);
2598 	if (rc != 0) {
2599 		SPDK_ERRLOG("Failed to initialize iobuf accel channel\n");
2600 		goto err;
2601 	}
2602 
2603 	return 0;
2604 err:
2605 	if (accel_ch->driver_channel != NULL) {
2606 		spdk_put_io_channel(accel_ch->driver_channel);
2607 	}
2608 	for (j = 0; j < i; j++) {
2609 		spdk_put_io_channel(accel_ch->module_ch[j]);
2610 	}
2611 	free(accel_ch->task_pool_base);
2612 	free(accel_ch->task_aux_data_base);
2613 	free(accel_ch->seq_pool_base);
2614 	free(accel_ch->buf_pool_base);
2615 
2616 	return -ENOMEM;
2617 }
2618 
2619 static void
2620 accel_add_stats(struct accel_stats *total, struct accel_stats *stats)
2621 {
2622 	int i;
2623 
2624 	total->sequence_executed += stats->sequence_executed;
2625 	total->sequence_failed += stats->sequence_failed;
2626 	total->retry.task += stats->retry.task;
2627 	total->retry.sequence += stats->retry.sequence;
2628 	total->retry.iobuf += stats->retry.iobuf;
2629 	total->retry.bufdesc += stats->retry.bufdesc;
2630 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; ++i) {
2631 		total->operations[i].executed += stats->operations[i].executed;
2632 		total->operations[i].failed += stats->operations[i].failed;
2633 		total->operations[i].num_bytes += stats->operations[i].num_bytes;
2634 	}
2635 }
2636 
2637 /* Framework level channel destroy callback. */
2638 static void
2639 accel_destroy_channel(void *io_device, void *ctx_buf)
2640 {
2641 	struct accel_io_channel	*accel_ch = ctx_buf;
2642 	int i;
2643 
2644 	spdk_iobuf_channel_fini(&accel_ch->iobuf);
2645 
2646 	if (accel_ch->driver_channel != NULL) {
2647 		spdk_put_io_channel(accel_ch->driver_channel);
2648 	}
2649 
2650 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
2651 		assert(accel_ch->module_ch[i] != NULL);
2652 		spdk_put_io_channel(accel_ch->module_ch[i]);
2653 		accel_ch->module_ch[i] = NULL;
2654 	}
2655 
2656 	/* Update global stats to make sure channel's stats aren't lost after a channel is gone */
2657 	spdk_spin_lock(&g_stats_lock);
2658 	accel_add_stats(&g_stats, &accel_ch->stats);
2659 	spdk_spin_unlock(&g_stats_lock);
2660 
2661 	free(accel_ch->task_pool_base);
2662 	free(accel_ch->task_aux_data_base);
2663 	free(accel_ch->seq_pool_base);
2664 	free(accel_ch->buf_pool_base);
2665 }
2666 
2667 struct spdk_io_channel *
2668 spdk_accel_get_io_channel(void)
2669 {
2670 	return spdk_get_io_channel(&spdk_accel_module_list);
2671 }
2672 
2673 static int
2674 accel_module_initialize(void)
2675 {
2676 	struct spdk_accel_module_if *accel_module, *tmp_module;
2677 	int rc = 0, module_rc;
2678 
2679 	TAILQ_FOREACH_SAFE(accel_module, &spdk_accel_module_list, tailq, tmp_module) {
2680 		module_rc = accel_module->module_init();
2681 		if (module_rc) {
2682 			TAILQ_REMOVE(&spdk_accel_module_list, accel_module, tailq);
2683 			if (module_rc == -ENODEV) {
2684 				SPDK_NOTICELOG("No devices for module %s, skipping\n", accel_module->name);
2685 			} else if (!rc) {
2686 				SPDK_ERRLOG("Module %s initialization failed with %d\n", accel_module->name, module_rc);
2687 				rc = module_rc;
2688 			}
2689 			continue;
2690 		}
2691 
2692 		SPDK_DEBUGLOG(accel, "Module %s initialized.\n", accel_module->name);
2693 	}
2694 
2695 	return rc;
2696 }
2697 
2698 static void
2699 accel_module_init_opcode(enum spdk_accel_opcode opcode)
2700 {
2701 	struct accel_module *module = &g_modules_opc[opcode];
2702 	struct spdk_accel_module_if *module_if = module->module;
2703 
2704 	if (module_if->get_memory_domains != NULL) {
2705 		module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0;
2706 	}
2707 }
2708 
2709 static int
2710 accel_memory_domain_translate(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
2711 			      struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx,
2712 			      void *addr, size_t len, struct spdk_memory_domain_translation_result *result)
2713 {
2714 	struct accel_buffer *buf = src_domain_ctx;
2715 
2716 	SPDK_DEBUGLOG(accel, "translate addr %p, len %zu\n", addr, len);
2717 
2718 	assert(g_accel_domain == src_domain);
2719 	assert(spdk_memory_domain_get_system_domain() == dst_domain);
2720 	assert(buf->buf == NULL);
2721 	assert(addr == ACCEL_BUFFER_BASE);
2722 	assert(len == buf->len);
2723 
2724 	buf->buf = spdk_iobuf_get(&buf->ch->iobuf, buf->len, NULL, NULL);
2725 	if (spdk_unlikely(buf->buf == NULL)) {
2726 		return -ENOMEM;
2727 	}
2728 
2729 	result->iov_count = 1;
2730 	result->iov.iov_base = buf->buf;
2731 	result->iov.iov_len = buf->len;
2732 	SPDK_DEBUGLOG(accel, "translated addr %p\n", result->iov.iov_base);
2733 	return 0;
2734 }
2735 
2736 static void
2737 accel_memory_domain_invalidate(struct spdk_memory_domain *domain, void *domain_ctx,
2738 			       struct iovec *iov, uint32_t iovcnt)
2739 {
2740 	struct accel_buffer *buf = domain_ctx;
2741 
2742 	SPDK_DEBUGLOG(accel, "invalidate addr %p, len %zu\n", iov[0].iov_base, iov[0].iov_len);
2743 
2744 	assert(g_accel_domain == domain);
2745 	assert(iovcnt == 1);
2746 	assert(buf->buf != NULL);
2747 	assert(iov[0].iov_base == buf->buf);
2748 	assert(iov[0].iov_len == buf->len);
2749 
2750 	spdk_iobuf_put(&buf->ch->iobuf, buf->buf, buf->len);
2751 	buf->buf = NULL;
2752 }
2753 
2754 int
2755 spdk_accel_initialize(void)
2756 {
2757 	enum spdk_accel_opcode op;
2758 	struct spdk_accel_module_if *accel_module = NULL;
2759 	int rc;
2760 
2761 	/*
2762 	 * We need a unique identifier for the accel framework, so use the
2763 	 * spdk_accel_module_list address for this purpose.
2764 	 */
2765 	spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel,
2766 				sizeof(struct accel_io_channel), "accel");
2767 
2768 	spdk_spin_init(&g_keyring_spin);
2769 	spdk_spin_init(&g_stats_lock);
2770 
2771 	rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL,
2772 				       "SPDK_ACCEL_DMA_DEVICE");
2773 	if (rc != 0) {
2774 		SPDK_ERRLOG("Failed to create accel memory domain\n");
2775 		return rc;
2776 	}
2777 
2778 	spdk_memory_domain_set_translation(g_accel_domain, accel_memory_domain_translate);
2779 	spdk_memory_domain_set_invalidate(g_accel_domain, accel_memory_domain_invalidate);
2780 
2781 	g_modules_started = true;
2782 	rc = accel_module_initialize();
2783 	if (rc) {
2784 		return rc;
2785 	}
2786 
2787 	if (g_accel_driver != NULL && g_accel_driver->init != NULL) {
2788 		rc = g_accel_driver->init();
2789 		if (rc != 0) {
2790 			SPDK_ERRLOG("Failed to initialize driver %s: %s\n", g_accel_driver->name,
2791 				    spdk_strerror(-rc));
2792 			return rc;
2793 		}
2794 	}
2795 
2796 	/* The module list is order by priority, with the highest priority modules being at the end
2797 	 * of the list.  The software module should be somewhere at the beginning of the list,
2798 	 * before all HW modules.
2799 	 * NOTE: all opcodes must be supported by software in the event that no HW modules are
2800 	 * initialized to support the operation.
2801 	 */
2802 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2803 		for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
2804 			if (accel_module->supports_opcode(op)) {
2805 				g_modules_opc[op].module = accel_module;
2806 				SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name);
2807 			}
2808 		}
2809 
2810 		if (accel_module->get_ctx_size != NULL) {
2811 			g_max_accel_module_size = spdk_max(g_max_accel_module_size,
2812 							   accel_module->get_ctx_size());
2813 		}
2814 	}
2815 
2816 	/* Now lets check for overrides and apply all that exist */
2817 	for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
2818 		if (g_modules_opc_override[op] != NULL) {
2819 			accel_module = _module_find_by_name(g_modules_opc_override[op]);
2820 			if (accel_module == NULL) {
2821 				SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]);
2822 				return -EINVAL;
2823 			}
2824 			if (accel_module->supports_opcode(op) == false) {
2825 				SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op);
2826 				return -EINVAL;
2827 			}
2828 			g_modules_opc[op].module = accel_module;
2829 		}
2830 	}
2831 
2832 	if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) {
2833 		SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations");
2834 		return -EINVAL;
2835 	}
2836 
2837 	for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
2838 		assert(g_modules_opc[op].module != NULL);
2839 		accel_module_init_opcode(op);
2840 	}
2841 
2842 	rc = spdk_iobuf_register_module("accel");
2843 	if (rc != 0) {
2844 		SPDK_ERRLOG("Failed to register accel iobuf module\n");
2845 		return rc;
2846 	}
2847 
2848 	return 0;
2849 }
2850 
2851 static void
2852 accel_module_finish_cb(void)
2853 {
2854 	spdk_accel_fini_cb cb_fn = g_fini_cb_fn;
2855 
2856 	cb_fn(g_fini_cb_arg);
2857 	g_fini_cb_fn = NULL;
2858 	g_fini_cb_arg = NULL;
2859 }
2860 
2861 static void
2862 accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str,
2863 			   const char *module_str)
2864 {
2865 	spdk_json_write_object_begin(w);
2866 	spdk_json_write_named_string(w, "method", "accel_assign_opc");
2867 	spdk_json_write_named_object_begin(w, "params");
2868 	spdk_json_write_named_string(w, "opname", opc_str);
2869 	spdk_json_write_named_string(w, "module", module_str);
2870 	spdk_json_write_object_end(w);
2871 	spdk_json_write_object_end(w);
2872 }
2873 
2874 static void
2875 __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2876 {
2877 	spdk_json_write_named_string(w, "name", key->param.key_name);
2878 	spdk_json_write_named_string(w, "cipher", key->param.cipher);
2879 	spdk_json_write_named_string(w, "key", key->param.hex_key);
2880 	if (key->param.hex_key2) {
2881 		spdk_json_write_named_string(w, "key2", key->param.hex_key2);
2882 	}
2883 
2884 	if (key->param.tweak_mode) {
2885 		spdk_json_write_named_string(w, "tweak_mode", key->param.tweak_mode);
2886 	}
2887 }
2888 
2889 void
2890 _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2891 {
2892 	spdk_json_write_object_begin(w);
2893 	__accel_crypto_key_dump_param(w, key);
2894 	spdk_json_write_object_end(w);
2895 }
2896 
2897 static void
2898 _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w,
2899 				    struct spdk_accel_crypto_key *key)
2900 {
2901 	spdk_json_write_object_begin(w);
2902 	spdk_json_write_named_string(w, "method", "accel_crypto_key_create");
2903 	spdk_json_write_named_object_begin(w, "params");
2904 	__accel_crypto_key_dump_param(w, key);
2905 	spdk_json_write_object_end(w);
2906 	spdk_json_write_object_end(w);
2907 }
2908 
2909 static void
2910 accel_write_options(struct spdk_json_write_ctx *w)
2911 {
2912 	spdk_json_write_object_begin(w);
2913 	spdk_json_write_named_string(w, "method", "accel_set_options");
2914 	spdk_json_write_named_object_begin(w, "params");
2915 	spdk_json_write_named_uint32(w, "small_cache_size", g_opts.small_cache_size);
2916 	spdk_json_write_named_uint32(w, "large_cache_size", g_opts.large_cache_size);
2917 	spdk_json_write_named_uint32(w, "task_count", g_opts.task_count);
2918 	spdk_json_write_named_uint32(w, "sequence_count", g_opts.sequence_count);
2919 	spdk_json_write_named_uint32(w, "buf_count", g_opts.buf_count);
2920 	spdk_json_write_object_end(w);
2921 	spdk_json_write_object_end(w);
2922 }
2923 
2924 static void
2925 _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump)
2926 {
2927 	struct spdk_accel_crypto_key *key;
2928 
2929 	spdk_spin_lock(&g_keyring_spin);
2930 	TAILQ_FOREACH(key, &g_keyring, link) {
2931 		if (full_dump) {
2932 			_accel_crypto_key_write_config_json(w, key);
2933 		} else {
2934 			_accel_crypto_key_dump_param(w, key);
2935 		}
2936 	}
2937 	spdk_spin_unlock(&g_keyring_spin);
2938 }
2939 
2940 void
2941 _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w)
2942 {
2943 	_accel_crypto_keys_write_config_json(w, false);
2944 }
2945 
2946 void
2947 spdk_accel_write_config_json(struct spdk_json_write_ctx *w)
2948 {
2949 	struct spdk_accel_module_if *accel_module;
2950 	int i;
2951 
2952 	spdk_json_write_array_begin(w);
2953 	accel_write_options(w);
2954 
2955 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2956 		if (accel_module->write_config_json) {
2957 			accel_module->write_config_json(w);
2958 		}
2959 	}
2960 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
2961 		if (g_modules_opc_override[i]) {
2962 			accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]);
2963 		}
2964 	}
2965 
2966 	_accel_crypto_keys_write_config_json(w, true);
2967 
2968 	spdk_json_write_array_end(w);
2969 }
2970 
2971 void
2972 spdk_accel_module_finish(void)
2973 {
2974 	if (!g_accel_module) {
2975 		g_accel_module = TAILQ_FIRST(&spdk_accel_module_list);
2976 	} else {
2977 		g_accel_module = TAILQ_NEXT(g_accel_module, tailq);
2978 	}
2979 
2980 	if (!g_accel_module) {
2981 		if (g_accel_driver != NULL && g_accel_driver->fini != NULL) {
2982 			g_accel_driver->fini();
2983 		}
2984 
2985 		spdk_spin_destroy(&g_keyring_spin);
2986 		spdk_spin_destroy(&g_stats_lock);
2987 		if (g_accel_domain) {
2988 			spdk_memory_domain_destroy(g_accel_domain);
2989 			g_accel_domain = NULL;
2990 		}
2991 		accel_module_finish_cb();
2992 		return;
2993 	}
2994 
2995 	if (g_accel_module->module_fini) {
2996 		spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL);
2997 	} else {
2998 		spdk_accel_module_finish();
2999 	}
3000 }
3001 
3002 static void
3003 accel_io_device_unregister_cb(void *io_device)
3004 {
3005 	struct spdk_accel_crypto_key *key, *key_tmp;
3006 	enum spdk_accel_opcode op;
3007 
3008 	spdk_spin_lock(&g_keyring_spin);
3009 	TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) {
3010 		accel_crypto_key_destroy_unsafe(key);
3011 	}
3012 	spdk_spin_unlock(&g_keyring_spin);
3013 
3014 	for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
3015 		if (g_modules_opc_override[op] != NULL) {
3016 			free(g_modules_opc_override[op]);
3017 			g_modules_opc_override[op] = NULL;
3018 		}
3019 		g_modules_opc[op].module = NULL;
3020 	}
3021 
3022 	spdk_accel_module_finish();
3023 }
3024 
3025 void
3026 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg)
3027 {
3028 	assert(cb_fn != NULL);
3029 
3030 	g_fini_cb_fn = cb_fn;
3031 	g_fini_cb_arg = cb_arg;
3032 
3033 	spdk_io_device_unregister(&spdk_accel_module_list, accel_io_device_unregister_cb);
3034 }
3035 
3036 static struct spdk_accel_driver *
3037 accel_find_driver(const char *name)
3038 {
3039 	struct spdk_accel_driver *driver;
3040 
3041 	TAILQ_FOREACH(driver, &g_accel_drivers, tailq) {
3042 		if (strcmp(driver->name, name) == 0) {
3043 			return driver;
3044 		}
3045 	}
3046 
3047 	return NULL;
3048 }
3049 
3050 int
3051 spdk_accel_set_driver(const char *name)
3052 {
3053 	struct spdk_accel_driver *driver;
3054 
3055 	driver = accel_find_driver(name);
3056 	if (driver == NULL) {
3057 		SPDK_ERRLOG("Couldn't find driver named '%s'\n", name);
3058 		return -ENODEV;
3059 	}
3060 
3061 	g_accel_driver = driver;
3062 
3063 	return 0;
3064 }
3065 
3066 void
3067 spdk_accel_driver_register(struct spdk_accel_driver *driver)
3068 {
3069 	if (accel_find_driver(driver->name)) {
3070 		SPDK_ERRLOG("Driver named '%s' has already been registered\n", driver->name);
3071 		assert(0);
3072 		return;
3073 	}
3074 
3075 	TAILQ_INSERT_TAIL(&g_accel_drivers, driver, tailq);
3076 }
3077 
3078 int
3079 spdk_accel_set_opts(const struct spdk_accel_opts *opts)
3080 {
3081 	if (opts->size > sizeof(*opts)) {
3082 		return -EINVAL;
3083 	}
3084 
3085 	memcpy(&g_opts, opts, opts->size);
3086 
3087 	return 0;
3088 }
3089 
3090 void
3091 spdk_accel_get_opts(struct spdk_accel_opts *opts)
3092 {
3093 	size_t size = opts->size;
3094 
3095 	assert(size <= sizeof(*opts));
3096 
3097 	memcpy(opts, &g_opts, spdk_min(sizeof(*opts), size));
3098 	opts->size = size;
3099 }
3100 
3101 struct accel_get_stats_ctx {
3102 	struct accel_stats	stats;
3103 	accel_get_stats_cb	cb_fn;
3104 	void			*cb_arg;
3105 };
3106 
3107 static void
3108 accel_get_channel_stats_done(struct spdk_io_channel_iter *iter, int status)
3109 {
3110 	struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
3111 
3112 	ctx->cb_fn(&ctx->stats, ctx->cb_arg);
3113 	free(ctx);
3114 }
3115 
3116 static void
3117 accel_get_channel_stats(struct spdk_io_channel_iter *iter)
3118 {
3119 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(iter);
3120 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
3121 	struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
3122 
3123 	accel_add_stats(&ctx->stats, &accel_ch->stats);
3124 	spdk_for_each_channel_continue(iter, 0);
3125 }
3126 
3127 int
3128 accel_get_stats(accel_get_stats_cb cb_fn, void *cb_arg)
3129 {
3130 	struct accel_get_stats_ctx *ctx;
3131 
3132 	ctx = calloc(1, sizeof(*ctx));
3133 	if (ctx == NULL) {
3134 		return -ENOMEM;
3135 	}
3136 
3137 	spdk_spin_lock(&g_stats_lock);
3138 	accel_add_stats(&ctx->stats, &g_stats);
3139 	spdk_spin_unlock(&g_stats_lock);
3140 
3141 	ctx->cb_fn = cb_fn;
3142 	ctx->cb_arg = cb_arg;
3143 
3144 	spdk_for_each_channel(&spdk_accel_module_list, accel_get_channel_stats, ctx,
3145 			      accel_get_channel_stats_done);
3146 
3147 	return 0;
3148 }
3149 
3150 void
3151 spdk_accel_get_opcode_stats(struct spdk_io_channel *ch, enum spdk_accel_opcode opcode,
3152 			    struct spdk_accel_opcode_stats *stats, size_t size)
3153 {
3154 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
3155 
3156 #define FIELD_OK(field) \
3157 	offsetof(struct spdk_accel_opcode_stats, field) + sizeof(stats->field) <= size
3158 
3159 #define SET_FIELD(field, value) \
3160 	if (FIELD_OK(field)) { \
3161 		stats->field = value; \
3162 	}
3163 
3164 	SET_FIELD(executed, accel_ch->stats.operations[opcode].executed);
3165 	SET_FIELD(failed, accel_ch->stats.operations[opcode].failed);
3166 	SET_FIELD(num_bytes, accel_ch->stats.operations[opcode].num_bytes);
3167 
3168 #undef FIELD_OK
3169 #undef SET_FIELD
3170 }
3171 
3172 uint8_t
3173 spdk_accel_get_buf_align(enum spdk_accel_opcode opcode,
3174 			 const struct spdk_accel_operation_exec_ctx *ctx)
3175 {
3176 	struct spdk_accel_module_if *module = g_modules_opc[opcode].module;
3177 	struct spdk_accel_opcode_info modinfo = {}, drvinfo = {};
3178 
3179 	if (g_accel_driver != NULL && g_accel_driver->get_operation_info != NULL) {
3180 		g_accel_driver->get_operation_info(opcode, ctx, &drvinfo);
3181 	}
3182 
3183 	if (module->get_operation_info != NULL) {
3184 		module->get_operation_info(opcode, ctx, &modinfo);
3185 	}
3186 
3187 	/* If a driver is set, it'll execute most of the operations, while the rest will usually
3188 	 * fall back to accel_sw, which doesn't have any alignment requiremenets.  However, to be
3189 	 * extra safe, return the max(driver, module) if a driver delegates some operations to a
3190 	 * hardware module. */
3191 	return spdk_max(modinfo.required_alignment, drvinfo.required_alignment);
3192 }
3193 
3194 struct spdk_accel_module_if *
3195 spdk_accel_get_module(const char *name)
3196 {
3197 	struct spdk_accel_module_if *module;
3198 
3199 	TAILQ_FOREACH(module, &spdk_accel_module_list, tailq) {
3200 		if (strcmp(module->name, name) == 0) {
3201 			return module;
3202 		}
3203 	}
3204 
3205 	return NULL;
3206 }
3207 
3208 SPDK_LOG_REGISTER_COMPONENT(accel)
3209