xref: /spdk/lib/accel/accel.c (revision dbcc38f096e5336dcc4ab5e60b10202db51c0a38)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2020 Intel Corporation.
3  *   Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES.
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/accel_module.h"
10 
11 #include "accel_internal.h"
12 
13 #include "spdk/dma.h"
14 #include "spdk/env.h"
15 #include "spdk/likely.h"
16 #include "spdk/log.h"
17 #include "spdk/thread.h"
18 #include "spdk/json.h"
19 #include "spdk/crc32.h"
20 #include "spdk/util.h"
21 #include "spdk/hexlify.h"
22 #include "spdk/string.h"
23 
24 /* Accelerator Framework: The following provides a top level
25  * generic API for the accelerator functions defined here. Modules,
26  * such as the one in /module/accel/ioat, supply the implementation
27  * with the exception of the pure software implementation contained
28  * later in this file.
29  */
30 
31 #define ALIGN_4K			0x1000
32 #define MAX_TASKS_PER_CHANNEL		0x800
33 #define ACCEL_SMALL_CACHE_SIZE		128
34 #define ACCEL_LARGE_CACHE_SIZE		16
35 /* Set MSB, so we don't return NULL pointers as buffers */
36 #define ACCEL_BUFFER_BASE		((void *)(1ull << 63))
37 #define ACCEL_BUFFER_OFFSET_MASK	((uintptr_t)ACCEL_BUFFER_BASE - 1)
38 
39 #define ACCEL_CRYPTO_TWEAK_MODE_DEFAULT	SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA
40 
41 struct accel_module {
42 	struct spdk_accel_module_if	*module;
43 	bool				supports_memory_domains;
44 };
45 
46 /* Largest context size for all accel modules */
47 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task);
48 
49 static struct spdk_accel_module_if *g_accel_module = NULL;
50 static spdk_accel_fini_cb g_fini_cb_fn = NULL;
51 static void *g_fini_cb_arg = NULL;
52 static bool g_modules_started = false;
53 static struct spdk_memory_domain *g_accel_domain;
54 
55 /* Global list of registered accelerator modules */
56 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list =
57 	TAILQ_HEAD_INITIALIZER(spdk_accel_module_list);
58 
59 /* Crypto keyring */
60 static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring);
61 static struct spdk_spinlock g_keyring_spin;
62 
63 /* Global array mapping capabilities to modules */
64 static struct accel_module g_modules_opc[SPDK_ACCEL_OPC_LAST] = {};
65 static char *g_modules_opc_override[SPDK_ACCEL_OPC_LAST] = {};
66 TAILQ_HEAD(, spdk_accel_driver) g_accel_drivers = TAILQ_HEAD_INITIALIZER(g_accel_drivers);
67 static struct spdk_accel_driver *g_accel_driver;
68 static struct spdk_accel_opts g_opts = {
69 	.small_cache_size = ACCEL_SMALL_CACHE_SIZE,
70 	.large_cache_size = ACCEL_LARGE_CACHE_SIZE,
71 	.task_count = MAX_TASKS_PER_CHANNEL,
72 	.sequence_count = MAX_TASKS_PER_CHANNEL,
73 	.buf_count = MAX_TASKS_PER_CHANNEL,
74 };
75 static struct accel_stats g_stats;
76 static struct spdk_spinlock g_stats_lock;
77 
78 static const char *g_opcode_strings[SPDK_ACCEL_OPC_LAST] = {
79 	"copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c",
80 	"compress", "decompress", "encrypt", "decrypt", "xor"
81 };
82 
83 enum accel_sequence_state {
84 	ACCEL_SEQUENCE_STATE_INIT,
85 	ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF,
86 	ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF,
87 	ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF,
88 	ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF,
89 	ACCEL_SEQUENCE_STATE_PULL_DATA,
90 	ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA,
91 	ACCEL_SEQUENCE_STATE_EXEC_TASK,
92 	ACCEL_SEQUENCE_STATE_AWAIT_TASK,
93 	ACCEL_SEQUENCE_STATE_COMPLETE_TASK,
94 	ACCEL_SEQUENCE_STATE_NEXT_TASK,
95 	ACCEL_SEQUENCE_STATE_PUSH_DATA,
96 	ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA,
97 	ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS,
98 	ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS,
99 	ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS,
100 	ACCEL_SEQUENCE_STATE_ERROR,
101 	ACCEL_SEQUENCE_STATE_MAX,
102 };
103 
104 static const char *g_seq_states[]
105 __attribute__((unused)) = {
106 	[ACCEL_SEQUENCE_STATE_INIT] = "init",
107 	[ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf",
108 	[ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf",
109 	[ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf",
110 	[ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf",
111 	[ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data",
112 	[ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data",
113 	[ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task",
114 	[ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task",
115 	[ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task",
116 	[ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task",
117 	[ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data",
118 	[ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data",
119 	[ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS] = "driver-exec-tasks",
120 	[ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS] = "driver-await-tasks",
121 	[ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS] = "driver-complete-tasks",
122 	[ACCEL_SEQUENCE_STATE_ERROR] = "error",
123 	[ACCEL_SEQUENCE_STATE_MAX] = "",
124 };
125 
126 #define ACCEL_SEQUENCE_STATE_STRING(s) \
127 	(((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \
128 	 ? g_seq_states[s] : "unknown")
129 
130 struct accel_buffer {
131 	struct spdk_accel_sequence	*seq;
132 	void				*buf;
133 	uint64_t			len;
134 	struct spdk_iobuf_entry		iobuf;
135 	spdk_accel_sequence_get_buf_cb	cb_fn;
136 	void				*cb_ctx;
137 	TAILQ_ENTRY(accel_buffer)	link;
138 };
139 
140 struct accel_io_channel {
141 	struct spdk_io_channel			*module_ch[SPDK_ACCEL_OPC_LAST];
142 	struct spdk_io_channel			*driver_channel;
143 	void					*task_pool_base;
144 	struct spdk_accel_sequence		*seq_pool_base;
145 	struct accel_buffer			*buf_pool_base;
146 	TAILQ_HEAD(, spdk_accel_task)		task_pool;
147 	TAILQ_HEAD(, spdk_accel_sequence)	seq_pool;
148 	TAILQ_HEAD(, accel_buffer)		buf_pool;
149 	struct spdk_iobuf_channel		iobuf;
150 	struct accel_stats			stats;
151 };
152 
153 TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task);
154 
155 struct spdk_accel_sequence {
156 	struct accel_io_channel			*ch;
157 	struct accel_sequence_tasks		tasks;
158 	struct accel_sequence_tasks		completed;
159 	TAILQ_HEAD(, accel_buffer)		bounce_bufs;
160 	int					status;
161 	/* state uses enum accel_sequence_state */
162 	uint8_t					state;
163 	bool					in_process_sequence;
164 	spdk_accel_completion_cb		cb_fn;
165 	void					*cb_arg;
166 	TAILQ_ENTRY(spdk_accel_sequence)	link;
167 };
168 
169 #define accel_update_stats(ch, event, v) \
170 	do { \
171 		(ch)->stats.event += (v); \
172 	} while (0)
173 
174 #define accel_update_task_stats(ch, task, event, v) \
175 	accel_update_stats(ch, operations[(task)->op_code].event, v)
176 
177 static inline void
178 accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state)
179 {
180 	SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq,
181 		      ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state));
182 	assert(seq->state != ACCEL_SEQUENCE_STATE_ERROR || state == ACCEL_SEQUENCE_STATE_ERROR);
183 	seq->state = state;
184 }
185 
186 static void
187 accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status)
188 {
189 	accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
190 	assert(status != 0);
191 	seq->status = status;
192 }
193 
194 int
195 spdk_accel_get_opc_module_name(enum spdk_accel_opcode opcode, const char **module_name)
196 {
197 	if (opcode >= SPDK_ACCEL_OPC_LAST) {
198 		/* invalid opcode */
199 		return -EINVAL;
200 	}
201 
202 	if (g_modules_opc[opcode].module) {
203 		*module_name = g_modules_opc[opcode].module->name;
204 	} else {
205 		return -ENOENT;
206 	}
207 
208 	return 0;
209 }
210 
211 void
212 _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn)
213 {
214 	struct spdk_accel_module_if *accel_module;
215 	enum spdk_accel_opcode opcode;
216 	int j = 0;
217 
218 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
219 		for (opcode = 0; opcode < SPDK_ACCEL_OPC_LAST; opcode++) {
220 			if (accel_module->supports_opcode(opcode)) {
221 				info->ops[j] = opcode;
222 				j++;
223 			}
224 		}
225 		info->name = accel_module->name;
226 		info->num_ops = j;
227 		fn(info);
228 		j = 0;
229 	}
230 }
231 
232 const char *
233 spdk_accel_get_opcode_name(enum spdk_accel_opcode opcode)
234 {
235 	if (opcode < SPDK_ACCEL_OPC_LAST) {
236 		return g_opcode_strings[opcode];
237 	}
238 
239 	return NULL;
240 }
241 
242 int
243 spdk_accel_assign_opc(enum spdk_accel_opcode opcode, const char *name)
244 {
245 	char *copy;
246 
247 	if (g_modules_started == true) {
248 		/* we don't allow re-assignment once things have started */
249 		return -EINVAL;
250 	}
251 
252 	if (opcode >= SPDK_ACCEL_OPC_LAST) {
253 		/* invalid opcode */
254 		return -EINVAL;
255 	}
256 
257 	copy = strdup(name);
258 	if (copy == NULL) {
259 		return -ENOMEM;
260 	}
261 
262 	/* module selection will be validated after the framework starts. */
263 	g_modules_opc_override[opcode] = copy;
264 
265 	return 0;
266 }
267 
268 void
269 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status)
270 {
271 	struct accel_io_channel *accel_ch = accel_task->accel_ch;
272 	spdk_accel_completion_cb	cb_fn = accel_task->cb_fn;
273 	void				*cb_arg = accel_task->cb_arg;
274 
275 	/* We should put the accel_task into the list firstly in order to avoid
276 	 * the accel task list is exhausted when there is recursive call to
277 	 * allocate accel_task in user's call back function (cb_fn)
278 	 */
279 	TAILQ_INSERT_HEAD(&accel_ch->task_pool, accel_task, link);
280 	accel_task->seq = NULL;
281 
282 	accel_update_task_stats(accel_ch, accel_task, executed, 1);
283 	accel_update_task_stats(accel_ch, accel_task, num_bytes, accel_task->nbytes);
284 	if (spdk_unlikely(status != 0)) {
285 		accel_update_task_stats(accel_ch, accel_task, failed, 1);
286 	}
287 
288 	cb_fn(cb_arg, status);
289 }
290 
291 inline static struct spdk_accel_task *
292 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg)
293 {
294 	struct spdk_accel_task *accel_task;
295 
296 	accel_task = TAILQ_FIRST(&accel_ch->task_pool);
297 	if (spdk_unlikely(accel_task == NULL)) {
298 		accel_update_stats(accel_ch, retry.task, 1);
299 		return NULL;
300 	}
301 
302 	TAILQ_REMOVE(&accel_ch->task_pool, accel_task, link);
303 	accel_task->link.tqe_next = NULL;
304 	accel_task->link.tqe_prev = NULL;
305 
306 	accel_task->cb_fn = cb_fn;
307 	accel_task->cb_arg = cb_arg;
308 	accel_task->accel_ch = accel_ch;
309 	accel_task->s.iovs = NULL;
310 	accel_task->d.iovs = NULL;
311 
312 	return accel_task;
313 }
314 
315 static inline int
316 accel_submit_task(struct accel_io_channel *accel_ch, struct spdk_accel_task *task)
317 {
318 	struct spdk_io_channel *module_ch = accel_ch->module_ch[task->op_code];
319 	struct spdk_accel_module_if *module = g_modules_opc[task->op_code].module;
320 	int rc;
321 
322 	rc = module->submit_tasks(module_ch, task);
323 	if (spdk_unlikely(rc != 0)) {
324 		accel_update_task_stats(accel_ch, task, failed, 1);
325 	}
326 
327 	return rc;
328 }
329 
330 static inline uint64_t
331 accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt)
332 {
333 	uint64_t result = 0;
334 	uint32_t i;
335 
336 	for (i = 0; i < iovcnt; ++i) {
337 		result += iovs[i].iov_len;
338 	}
339 
340 	return result;
341 }
342 
343 /* Accel framework public API for copy function */
344 int
345 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src,
346 		       uint64_t nbytes, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
347 {
348 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
349 	struct spdk_accel_task *accel_task;
350 
351 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
352 	if (spdk_unlikely(accel_task == NULL)) {
353 		return -ENOMEM;
354 	}
355 
356 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
357 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
358 	accel_task->d.iovs[0].iov_base = dst;
359 	accel_task->d.iovs[0].iov_len = nbytes;
360 	accel_task->d.iovcnt = 1;
361 	accel_task->s.iovs[0].iov_base = src;
362 	accel_task->s.iovs[0].iov_len = nbytes;
363 	accel_task->s.iovcnt = 1;
364 	accel_task->nbytes = nbytes;
365 	accel_task->op_code = SPDK_ACCEL_OPC_COPY;
366 	accel_task->flags = flags;
367 	accel_task->src_domain = NULL;
368 	accel_task->dst_domain = NULL;
369 	accel_task->step_cb_fn = NULL;
370 
371 	return accel_submit_task(accel_ch, accel_task);
372 }
373 
374 /* Accel framework public API for dual cast copy function */
375 int
376 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1,
377 			   void *dst2, void *src, uint64_t nbytes, int flags,
378 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
379 {
380 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
381 	struct spdk_accel_task *accel_task;
382 
383 	if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) {
384 		SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n");
385 		return -EINVAL;
386 	}
387 
388 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
389 	if (spdk_unlikely(accel_task == NULL)) {
390 		return -ENOMEM;
391 	}
392 
393 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
394 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
395 	accel_task->d2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST2];
396 	accel_task->d.iovs[0].iov_base = dst1;
397 	accel_task->d.iovs[0].iov_len = nbytes;
398 	accel_task->d.iovcnt = 1;
399 	accel_task->d2.iovs[0].iov_base = dst2;
400 	accel_task->d2.iovs[0].iov_len = nbytes;
401 	accel_task->d2.iovcnt = 1;
402 	accel_task->s.iovs[0].iov_base = src;
403 	accel_task->s.iovs[0].iov_len = nbytes;
404 	accel_task->s.iovcnt = 1;
405 	accel_task->nbytes = nbytes;
406 	accel_task->flags = flags;
407 	accel_task->op_code = SPDK_ACCEL_OPC_DUALCAST;
408 	accel_task->src_domain = NULL;
409 	accel_task->dst_domain = NULL;
410 	accel_task->step_cb_fn = NULL;
411 
412 	return accel_submit_task(accel_ch, accel_task);
413 }
414 
415 /* Accel framework public API for compare function */
416 int
417 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1,
418 			  void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
419 			  void *cb_arg)
420 {
421 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
422 	struct spdk_accel_task *accel_task;
423 
424 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
425 	if (spdk_unlikely(accel_task == NULL)) {
426 		return -ENOMEM;
427 	}
428 
429 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
430 	accel_task->s2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC2];
431 	accel_task->s.iovs[0].iov_base = src1;
432 	accel_task->s.iovs[0].iov_len = nbytes;
433 	accel_task->s.iovcnt = 1;
434 	accel_task->s2.iovs[0].iov_base = src2;
435 	accel_task->s2.iovs[0].iov_len = nbytes;
436 	accel_task->s2.iovcnt = 1;
437 	accel_task->nbytes = nbytes;
438 	accel_task->op_code = SPDK_ACCEL_OPC_COMPARE;
439 	accel_task->src_domain = NULL;
440 	accel_task->dst_domain = NULL;
441 	accel_task->step_cb_fn = NULL;
442 
443 	return accel_submit_task(accel_ch, accel_task);
444 }
445 
446 /* Accel framework public API for fill function */
447 int
448 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst,
449 		       uint8_t fill, uint64_t nbytes, int flags,
450 		       spdk_accel_completion_cb cb_fn, void *cb_arg)
451 {
452 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
453 	struct spdk_accel_task *accel_task;
454 
455 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
456 	if (spdk_unlikely(accel_task == NULL)) {
457 		return -ENOMEM;
458 	}
459 
460 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
461 	accel_task->d.iovs[0].iov_base = dst;
462 	accel_task->d.iovs[0].iov_len = nbytes;
463 	accel_task->d.iovcnt = 1;
464 	accel_task->nbytes = nbytes;
465 	memset(&accel_task->fill_pattern, fill, sizeof(uint64_t));
466 	accel_task->flags = flags;
467 	accel_task->op_code = SPDK_ACCEL_OPC_FILL;
468 	accel_task->src_domain = NULL;
469 	accel_task->dst_domain = NULL;
470 	accel_task->step_cb_fn = NULL;
471 
472 	return accel_submit_task(accel_ch, accel_task);
473 }
474 
475 /* Accel framework public API for CRC-32C function */
476 int
477 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst,
478 			 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
479 			 void *cb_arg)
480 {
481 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
482 	struct spdk_accel_task *accel_task;
483 
484 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
485 	if (spdk_unlikely(accel_task == NULL)) {
486 		return -ENOMEM;
487 	}
488 
489 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
490 	accel_task->s.iovs[0].iov_base = src;
491 	accel_task->s.iovs[0].iov_len = nbytes;
492 	accel_task->s.iovcnt = 1;
493 	accel_task->nbytes = nbytes;
494 	accel_task->crc_dst = crc_dst;
495 	accel_task->seed = seed;
496 	accel_task->op_code = SPDK_ACCEL_OPC_CRC32C;
497 	accel_task->src_domain = NULL;
498 	accel_task->dst_domain = NULL;
499 	accel_task->step_cb_fn = NULL;
500 
501 	return accel_submit_task(accel_ch, accel_task);
502 }
503 
504 /* Accel framework public API for chained CRC-32C function */
505 int
506 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst,
507 			  struct iovec *iov, uint32_t iov_cnt, uint32_t seed,
508 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
509 {
510 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
511 	struct spdk_accel_task *accel_task;
512 
513 	if (iov == NULL) {
514 		SPDK_ERRLOG("iov should not be NULL");
515 		return -EINVAL;
516 	}
517 
518 	if (!iov_cnt) {
519 		SPDK_ERRLOG("iovcnt should not be zero value\n");
520 		return -EINVAL;
521 	}
522 
523 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
524 	if (spdk_unlikely(accel_task == NULL)) {
525 		SPDK_ERRLOG("no memory\n");
526 		assert(0);
527 		return -ENOMEM;
528 	}
529 
530 	accel_task->s.iovs = iov;
531 	accel_task->s.iovcnt = iov_cnt;
532 	accel_task->nbytes = accel_get_iovlen(iov, iov_cnt);
533 	accel_task->crc_dst = crc_dst;
534 	accel_task->seed = seed;
535 	accel_task->op_code = SPDK_ACCEL_OPC_CRC32C;
536 	accel_task->src_domain = NULL;
537 	accel_task->dst_domain = NULL;
538 	accel_task->step_cb_fn = NULL;
539 
540 	return accel_submit_task(accel_ch, accel_task);
541 }
542 
543 /* Accel framework public API for copy with CRC-32C function */
544 int
545 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst,
546 			      void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes,
547 			      int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
548 {
549 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
550 	struct spdk_accel_task *accel_task;
551 
552 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
553 	if (spdk_unlikely(accel_task == NULL)) {
554 		return -ENOMEM;
555 	}
556 
557 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
558 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
559 	accel_task->d.iovs[0].iov_base = dst;
560 	accel_task->d.iovs[0].iov_len = nbytes;
561 	accel_task->d.iovcnt = 1;
562 	accel_task->s.iovs[0].iov_base = src;
563 	accel_task->s.iovs[0].iov_len = nbytes;
564 	accel_task->s.iovcnt = 1;
565 	accel_task->nbytes = nbytes;
566 	accel_task->crc_dst = crc_dst;
567 	accel_task->seed = seed;
568 	accel_task->flags = flags;
569 	accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C;
570 	accel_task->src_domain = NULL;
571 	accel_task->dst_domain = NULL;
572 	accel_task->step_cb_fn = NULL;
573 
574 	return accel_submit_task(accel_ch, accel_task);
575 }
576 
577 /* Accel framework public API for chained copy + CRC-32C function */
578 int
579 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst,
580 			       struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst,
581 			       uint32_t seed, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
582 {
583 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
584 	struct spdk_accel_task *accel_task;
585 	uint64_t nbytes;
586 
587 	if (src_iovs == NULL) {
588 		SPDK_ERRLOG("iov should not be NULL");
589 		return -EINVAL;
590 	}
591 
592 	if (!iov_cnt) {
593 		SPDK_ERRLOG("iovcnt should not be zero value\n");
594 		return -EINVAL;
595 	}
596 
597 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
598 	if (spdk_unlikely(accel_task == NULL)) {
599 		SPDK_ERRLOG("no memory\n");
600 		assert(0);
601 		return -ENOMEM;
602 	}
603 
604 	nbytes = accel_get_iovlen(src_iovs, iov_cnt);
605 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
606 	accel_task->d.iovs[0].iov_base = dst;
607 	accel_task->d.iovs[0].iov_len = nbytes;
608 	accel_task->d.iovcnt = 1;
609 	accel_task->s.iovs = src_iovs;
610 	accel_task->s.iovcnt = iov_cnt;
611 	accel_task->nbytes = nbytes;
612 	accel_task->crc_dst = crc_dst;
613 	accel_task->seed = seed;
614 	accel_task->flags = flags;
615 	accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C;
616 	accel_task->src_domain = NULL;
617 	accel_task->dst_domain = NULL;
618 	accel_task->step_cb_fn = NULL;
619 
620 	return accel_submit_task(accel_ch, accel_task);
621 }
622 
623 int
624 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes,
625 			   struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size, int flags,
626 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
627 {
628 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
629 	struct spdk_accel_task *accel_task;
630 
631 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
632 	if (spdk_unlikely(accel_task == NULL)) {
633 		return -ENOMEM;
634 	}
635 
636 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
637 	accel_task->d.iovs[0].iov_base = dst;
638 	accel_task->d.iovs[0].iov_len = nbytes;
639 	accel_task->d.iovcnt = 1;
640 	accel_task->output_size = output_size;
641 	accel_task->s.iovs = src_iovs;
642 	accel_task->s.iovcnt = src_iovcnt;
643 	accel_task->nbytes = nbytes;
644 	accel_task->flags = flags;
645 	accel_task->op_code = SPDK_ACCEL_OPC_COMPRESS;
646 	accel_task->src_domain = NULL;
647 	accel_task->dst_domain = NULL;
648 	accel_task->step_cb_fn = NULL;
649 
650 	return accel_submit_task(accel_ch, accel_task);
651 }
652 
653 int
654 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs,
655 			     size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
656 			     uint32_t *output_size, int flags, spdk_accel_completion_cb cb_fn,
657 			     void *cb_arg)
658 {
659 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
660 	struct spdk_accel_task *accel_task;
661 
662 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
663 	if (spdk_unlikely(accel_task == NULL)) {
664 		return -ENOMEM;
665 	}
666 
667 	accel_task->output_size = output_size;
668 	accel_task->s.iovs = src_iovs;
669 	accel_task->s.iovcnt = src_iovcnt;
670 	accel_task->d.iovs = dst_iovs;
671 	accel_task->d.iovcnt = dst_iovcnt;
672 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
673 	accel_task->flags = flags;
674 	accel_task->op_code = SPDK_ACCEL_OPC_DECOMPRESS;
675 	accel_task->src_domain = NULL;
676 	accel_task->dst_domain = NULL;
677 	accel_task->step_cb_fn = NULL;
678 
679 	return accel_submit_task(accel_ch, accel_task);
680 }
681 
682 int
683 spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
684 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
685 			  struct iovec *src_iovs, uint32_t src_iovcnt,
686 			  uint64_t iv, uint32_t block_size, int flags,
687 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
688 {
689 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
690 	struct spdk_accel_task *accel_task;
691 
692 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
693 		return -EINVAL;
694 	}
695 
696 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
697 	if (spdk_unlikely(accel_task == NULL)) {
698 		return -ENOMEM;
699 	}
700 
701 	accel_task->crypto_key = key;
702 	accel_task->s.iovs = src_iovs;
703 	accel_task->s.iovcnt = src_iovcnt;
704 	accel_task->d.iovs = dst_iovs;
705 	accel_task->d.iovcnt = dst_iovcnt;
706 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
707 	accel_task->iv = iv;
708 	accel_task->block_size = block_size;
709 	accel_task->flags = flags;
710 	accel_task->op_code = SPDK_ACCEL_OPC_ENCRYPT;
711 	accel_task->src_domain = NULL;
712 	accel_task->dst_domain = NULL;
713 	accel_task->step_cb_fn = NULL;
714 
715 	return accel_submit_task(accel_ch, accel_task);
716 }
717 
718 int
719 spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
720 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
721 			  struct iovec *src_iovs, uint32_t src_iovcnt,
722 			  uint64_t iv, uint32_t block_size, int flags,
723 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
724 {
725 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
726 	struct spdk_accel_task *accel_task;
727 
728 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
729 		return -EINVAL;
730 	}
731 
732 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
733 	if (spdk_unlikely(accel_task == NULL)) {
734 		return -ENOMEM;
735 	}
736 
737 	accel_task->crypto_key = key;
738 	accel_task->s.iovs = src_iovs;
739 	accel_task->s.iovcnt = src_iovcnt;
740 	accel_task->d.iovs = dst_iovs;
741 	accel_task->d.iovcnt = dst_iovcnt;
742 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
743 	accel_task->iv = iv;
744 	accel_task->block_size = block_size;
745 	accel_task->flags = flags;
746 	accel_task->op_code = SPDK_ACCEL_OPC_DECRYPT;
747 	accel_task->src_domain = NULL;
748 	accel_task->dst_domain = NULL;
749 	accel_task->step_cb_fn = NULL;
750 
751 	return accel_submit_task(accel_ch, accel_task);
752 }
753 
754 int
755 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs,
756 		      uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
757 {
758 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
759 	struct spdk_accel_task *accel_task;
760 
761 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
762 	if (spdk_unlikely(accel_task == NULL)) {
763 		return -ENOMEM;
764 	}
765 
766 	accel_task->nsrcs.srcs = sources;
767 	accel_task->nsrcs.cnt = nsrcs;
768 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
769 	accel_task->d.iovs[0].iov_base = dst;
770 	accel_task->d.iovs[0].iov_len = nbytes;
771 	accel_task->d.iovcnt = 1;
772 	accel_task->nbytes = nbytes;
773 	accel_task->op_code = SPDK_ACCEL_OPC_XOR;
774 	accel_task->src_domain = NULL;
775 	accel_task->dst_domain = NULL;
776 	accel_task->step_cb_fn = NULL;
777 
778 	return accel_submit_task(accel_ch, accel_task);
779 }
780 
781 static inline struct accel_buffer *
782 accel_get_buf(struct accel_io_channel *ch, uint64_t len)
783 {
784 	struct accel_buffer *buf;
785 
786 	buf = TAILQ_FIRST(&ch->buf_pool);
787 	if (spdk_unlikely(buf == NULL)) {
788 		accel_update_stats(ch, retry.bufdesc, 1);
789 		return NULL;
790 	}
791 
792 	TAILQ_REMOVE(&ch->buf_pool, buf, link);
793 	buf->len = len;
794 	buf->buf = NULL;
795 	buf->seq = NULL;
796 	buf->cb_fn = NULL;
797 
798 	return buf;
799 }
800 
801 static inline void
802 accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf)
803 {
804 	if (buf->buf != NULL) {
805 		spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len);
806 	}
807 
808 	TAILQ_INSERT_HEAD(&ch->buf_pool, buf, link);
809 }
810 
811 static inline struct spdk_accel_sequence *
812 accel_sequence_get(struct accel_io_channel *ch)
813 {
814 	struct spdk_accel_sequence *seq;
815 
816 	seq = TAILQ_FIRST(&ch->seq_pool);
817 	if (spdk_unlikely(seq == NULL)) {
818 		accel_update_stats(ch, retry.sequence, 1);
819 		return NULL;
820 	}
821 
822 	TAILQ_REMOVE(&ch->seq_pool, seq, link);
823 
824 	TAILQ_INIT(&seq->tasks);
825 	TAILQ_INIT(&seq->completed);
826 	TAILQ_INIT(&seq->bounce_bufs);
827 
828 	seq->ch = ch;
829 	seq->status = 0;
830 	seq->state = ACCEL_SEQUENCE_STATE_INIT;
831 	seq->in_process_sequence = false;
832 
833 	return seq;
834 }
835 
836 static inline void
837 accel_sequence_put(struct spdk_accel_sequence *seq)
838 {
839 	struct accel_io_channel *ch = seq->ch;
840 	struct accel_buffer *buf;
841 
842 	while (!TAILQ_EMPTY(&seq->bounce_bufs)) {
843 		buf = TAILQ_FIRST(&seq->bounce_bufs);
844 		TAILQ_REMOVE(&seq->bounce_bufs, buf, link);
845 		accel_put_buf(seq->ch, buf);
846 	}
847 
848 	assert(TAILQ_EMPTY(&seq->tasks));
849 	assert(TAILQ_EMPTY(&seq->completed));
850 	seq->ch = NULL;
851 
852 	TAILQ_INSERT_HEAD(&ch->seq_pool, seq, link);
853 }
854 
855 static void accel_sequence_task_cb(void *cb_arg, int status);
856 
857 static inline struct spdk_accel_task *
858 accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq,
859 			spdk_accel_step_cb cb_fn, void *cb_arg)
860 {
861 	struct spdk_accel_task *task;
862 
863 	task = _get_task(ch, accel_sequence_task_cb, seq);
864 	if (spdk_unlikely(task == NULL)) {
865 		return task;
866 	}
867 
868 	task->step_cb_fn = cb_fn;
869 	task->step_cb_arg = cb_arg;
870 	task->seq = seq;
871 
872 	return task;
873 }
874 
875 int
876 spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
877 		       struct iovec *dst_iovs, uint32_t dst_iovcnt,
878 		       struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
879 		       struct iovec *src_iovs, uint32_t src_iovcnt,
880 		       struct spdk_memory_domain *src_domain, void *src_domain_ctx,
881 		       int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
882 {
883 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
884 	struct spdk_accel_task *task;
885 	struct spdk_accel_sequence *seq = *pseq;
886 
887 	if (seq == NULL) {
888 		seq = accel_sequence_get(accel_ch);
889 		if (spdk_unlikely(seq == NULL)) {
890 			return -ENOMEM;
891 		}
892 	}
893 
894 	assert(seq->ch == accel_ch);
895 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
896 	if (spdk_unlikely(task == NULL)) {
897 		if (*pseq == NULL) {
898 			accel_sequence_put(seq);
899 		}
900 
901 		return -ENOMEM;
902 	}
903 
904 	task->dst_domain = dst_domain;
905 	task->dst_domain_ctx = dst_domain_ctx;
906 	task->d.iovs = dst_iovs;
907 	task->d.iovcnt = dst_iovcnt;
908 	task->src_domain = src_domain;
909 	task->src_domain_ctx = src_domain_ctx;
910 	task->s.iovs = src_iovs;
911 	task->s.iovcnt = src_iovcnt;
912 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
913 	task->flags = flags;
914 	task->op_code = SPDK_ACCEL_OPC_COPY;
915 
916 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
917 	*pseq = seq;
918 
919 	return 0;
920 }
921 
922 int
923 spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
924 		       void *buf, uint64_t len,
925 		       struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern,
926 		       int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
927 {
928 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
929 	struct spdk_accel_task *task;
930 	struct spdk_accel_sequence *seq = *pseq;
931 
932 	if (seq == NULL) {
933 		seq = accel_sequence_get(accel_ch);
934 		if (spdk_unlikely(seq == NULL)) {
935 			return -ENOMEM;
936 		}
937 	}
938 
939 	assert(seq->ch == accel_ch);
940 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
941 	if (spdk_unlikely(task == NULL)) {
942 		if (*pseq == NULL) {
943 			accel_sequence_put(seq);
944 		}
945 
946 		return -ENOMEM;
947 	}
948 
949 	memset(&task->fill_pattern, pattern, sizeof(uint64_t));
950 
951 	task->d.iovs = &task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
952 	task->d.iovs[0].iov_base = buf;
953 	task->d.iovs[0].iov_len = len;
954 	task->d.iovcnt = 1;
955 	task->nbytes = len;
956 	task->src_domain = NULL;
957 	task->dst_domain = domain;
958 	task->dst_domain_ctx = domain_ctx;
959 	task->flags = flags;
960 	task->op_code = SPDK_ACCEL_OPC_FILL;
961 
962 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
963 	*pseq = seq;
964 
965 	return 0;
966 }
967 
968 int
969 spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
970 			     struct iovec *dst_iovs, size_t dst_iovcnt,
971 			     struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
972 			     struct iovec *src_iovs, size_t src_iovcnt,
973 			     struct spdk_memory_domain *src_domain, void *src_domain_ctx,
974 			     int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
975 {
976 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
977 	struct spdk_accel_task *task;
978 	struct spdk_accel_sequence *seq = *pseq;
979 
980 	if (seq == NULL) {
981 		seq = accel_sequence_get(accel_ch);
982 		if (spdk_unlikely(seq == NULL)) {
983 			return -ENOMEM;
984 		}
985 	}
986 
987 	assert(seq->ch == accel_ch);
988 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
989 	if (spdk_unlikely(task == NULL)) {
990 		if (*pseq == NULL) {
991 			accel_sequence_put(seq);
992 		}
993 
994 		return -ENOMEM;
995 	}
996 
997 	/* TODO: support output_size for chaining */
998 	task->output_size = NULL;
999 	task->dst_domain = dst_domain;
1000 	task->dst_domain_ctx = dst_domain_ctx;
1001 	task->d.iovs = dst_iovs;
1002 	task->d.iovcnt = dst_iovcnt;
1003 	task->src_domain = src_domain;
1004 	task->src_domain_ctx = src_domain_ctx;
1005 	task->s.iovs = src_iovs;
1006 	task->s.iovcnt = src_iovcnt;
1007 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1008 	task->flags = flags;
1009 	task->op_code = SPDK_ACCEL_OPC_DECOMPRESS;
1010 
1011 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1012 	*pseq = seq;
1013 
1014 	return 0;
1015 }
1016 
1017 int
1018 spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1019 			  struct spdk_accel_crypto_key *key,
1020 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
1021 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1022 			  struct iovec *src_iovs, uint32_t src_iovcnt,
1023 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1024 			  uint64_t iv, uint32_t block_size, int flags,
1025 			  spdk_accel_step_cb cb_fn, void *cb_arg)
1026 {
1027 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1028 	struct spdk_accel_task *task;
1029 	struct spdk_accel_sequence *seq = *pseq;
1030 
1031 	assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size);
1032 
1033 	if (seq == NULL) {
1034 		seq = accel_sequence_get(accel_ch);
1035 		if (spdk_unlikely(seq == NULL)) {
1036 			return -ENOMEM;
1037 		}
1038 	}
1039 
1040 	assert(seq->ch == accel_ch);
1041 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1042 	if (spdk_unlikely(task == NULL)) {
1043 		if (*pseq == NULL) {
1044 			accel_sequence_put(seq);
1045 		}
1046 
1047 		return -ENOMEM;
1048 	}
1049 
1050 	task->crypto_key = key;
1051 	task->src_domain = src_domain;
1052 	task->src_domain_ctx = src_domain_ctx;
1053 	task->s.iovs = src_iovs;
1054 	task->s.iovcnt = src_iovcnt;
1055 	task->dst_domain = dst_domain;
1056 	task->dst_domain_ctx = dst_domain_ctx;
1057 	task->d.iovs = dst_iovs;
1058 	task->d.iovcnt = dst_iovcnt;
1059 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1060 	task->iv = iv;
1061 	task->block_size = block_size;
1062 	task->flags = flags;
1063 	task->op_code = SPDK_ACCEL_OPC_ENCRYPT;
1064 
1065 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1066 	*pseq = seq;
1067 
1068 	return 0;
1069 }
1070 
1071 int
1072 spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1073 			  struct spdk_accel_crypto_key *key,
1074 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
1075 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1076 			  struct iovec *src_iovs, uint32_t src_iovcnt,
1077 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1078 			  uint64_t iv, uint32_t block_size, int flags,
1079 			  spdk_accel_step_cb cb_fn, void *cb_arg)
1080 {
1081 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1082 	struct spdk_accel_task *task;
1083 	struct spdk_accel_sequence *seq = *pseq;
1084 
1085 	assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size);
1086 
1087 	if (seq == NULL) {
1088 		seq = accel_sequence_get(accel_ch);
1089 		if (spdk_unlikely(seq == NULL)) {
1090 			return -ENOMEM;
1091 		}
1092 	}
1093 
1094 	assert(seq->ch == accel_ch);
1095 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1096 	if (spdk_unlikely(task == NULL)) {
1097 		if (*pseq == NULL) {
1098 			accel_sequence_put(seq);
1099 		}
1100 
1101 		return -ENOMEM;
1102 	}
1103 
1104 	task->crypto_key = key;
1105 	task->src_domain = src_domain;
1106 	task->src_domain_ctx = src_domain_ctx;
1107 	task->s.iovs = src_iovs;
1108 	task->s.iovcnt = src_iovcnt;
1109 	task->dst_domain = dst_domain;
1110 	task->dst_domain_ctx = dst_domain_ctx;
1111 	task->d.iovs = dst_iovs;
1112 	task->d.iovcnt = dst_iovcnt;
1113 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1114 	task->iv = iv;
1115 	task->block_size = block_size;
1116 	task->flags = flags;
1117 	task->op_code = SPDK_ACCEL_OPC_DECRYPT;
1118 
1119 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1120 	*pseq = seq;
1121 
1122 	return 0;
1123 }
1124 
1125 int
1126 spdk_accel_append_crc32c(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1127 			 uint32_t *dst, struct iovec *iovs, uint32_t iovcnt,
1128 			 struct spdk_memory_domain *domain, void *domain_ctx,
1129 			 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg)
1130 {
1131 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1132 	struct spdk_accel_task *task;
1133 	struct spdk_accel_sequence *seq = *pseq;
1134 
1135 	if (seq == NULL) {
1136 		seq = accel_sequence_get(accel_ch);
1137 		if (spdk_unlikely(seq == NULL)) {
1138 			return -ENOMEM;
1139 		}
1140 	}
1141 
1142 	assert(seq->ch == accel_ch);
1143 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1144 	if (spdk_unlikely(task == NULL)) {
1145 		if (*pseq == NULL) {
1146 			accel_sequence_put(seq);
1147 		}
1148 
1149 		return -ENOMEM;
1150 	}
1151 
1152 	task->s.iovs = iovs;
1153 	task->s.iovcnt = iovcnt;
1154 	task->src_domain = domain;
1155 	task->src_domain_ctx = domain_ctx;
1156 	task->nbytes = accel_get_iovlen(iovs, iovcnt);
1157 	task->crc_dst = dst;
1158 	task->seed = seed;
1159 	task->op_code = SPDK_ACCEL_OPC_CRC32C;
1160 	task->dst_domain = NULL;
1161 
1162 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1163 	*pseq = seq;
1164 
1165 	return 0;
1166 }
1167 
1168 int
1169 spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf,
1170 		   struct spdk_memory_domain **domain, void **domain_ctx)
1171 {
1172 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1173 	struct accel_buffer *accel_buf;
1174 
1175 	accel_buf = accel_get_buf(accel_ch, len);
1176 	if (spdk_unlikely(accel_buf == NULL)) {
1177 		return -ENOMEM;
1178 	}
1179 
1180 	/* We always return the same pointer and identify the buffers through domain_ctx */
1181 	*buf = ACCEL_BUFFER_BASE;
1182 	*domain_ctx = accel_buf;
1183 	*domain = g_accel_domain;
1184 
1185 	return 0;
1186 }
1187 
1188 void
1189 spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf,
1190 		   struct spdk_memory_domain *domain, void *domain_ctx)
1191 {
1192 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1193 	struct accel_buffer *accel_buf = domain_ctx;
1194 
1195 	assert(domain == g_accel_domain);
1196 	assert(buf == ACCEL_BUFFER_BASE);
1197 
1198 	accel_put_buf(accel_ch, accel_buf);
1199 }
1200 
1201 static void
1202 accel_sequence_complete_tasks(struct spdk_accel_sequence *seq)
1203 {
1204 	struct spdk_accel_task *task;
1205 	struct accel_io_channel *ch = seq->ch;
1206 	spdk_accel_step_cb cb_fn;
1207 	void *cb_arg;
1208 
1209 	while (!TAILQ_EMPTY(&seq->completed)) {
1210 		task = TAILQ_FIRST(&seq->completed);
1211 		TAILQ_REMOVE(&seq->completed, task, seq_link);
1212 		cb_fn = task->step_cb_fn;
1213 		cb_arg = task->step_cb_arg;
1214 		TAILQ_INSERT_HEAD(&ch->task_pool, task, link);
1215 		if (cb_fn != NULL) {
1216 			cb_fn(cb_arg);
1217 		}
1218 	}
1219 
1220 	while (!TAILQ_EMPTY(&seq->tasks)) {
1221 		task = TAILQ_FIRST(&seq->tasks);
1222 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1223 		cb_fn = task->step_cb_fn;
1224 		cb_arg = task->step_cb_arg;
1225 		TAILQ_INSERT_HEAD(&ch->task_pool, task, link);
1226 		if (cb_fn != NULL) {
1227 			cb_fn(cb_arg);
1228 		}
1229 	}
1230 }
1231 
1232 static void
1233 accel_sequence_complete(struct spdk_accel_sequence *seq)
1234 {
1235 	SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, seq->status);
1236 
1237 	accel_update_stats(seq->ch, sequence_executed, 1);
1238 	if (spdk_unlikely(seq->status != 0)) {
1239 		accel_update_stats(seq->ch, sequence_failed, 1);
1240 	}
1241 
1242 	/* First notify all users that appended operations to this sequence */
1243 	accel_sequence_complete_tasks(seq);
1244 
1245 	/* Then notify the user that finished the sequence */
1246 	seq->cb_fn(seq->cb_arg, seq->status);
1247 
1248 	accel_sequence_put(seq);
1249 }
1250 
1251 static void
1252 accel_update_virt_iov(struct iovec *diov, struct iovec *siov, struct accel_buffer *accel_buf)
1253 {
1254 	uintptr_t offset;
1255 
1256 	offset = (uintptr_t)siov->iov_base & ACCEL_BUFFER_OFFSET_MASK;
1257 	assert(offset < accel_buf->len);
1258 
1259 	diov->iov_base = (char *)accel_buf->buf + offset;
1260 	diov->iov_len = siov->iov_len;
1261 }
1262 
1263 static void
1264 accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf)
1265 {
1266 	struct spdk_accel_task *task;
1267 	struct iovec *iov;
1268 
1269 	/* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks
1270 	 * in a sequence that were using it.
1271 	 */
1272 	TAILQ_FOREACH(task, &seq->tasks, seq_link) {
1273 		if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) {
1274 			iov = &task->aux_iovs[SPDK_ACCEL_AXU_IOV_VIRT_SRC];
1275 			assert(task->s.iovcnt == 1);
1276 			accel_update_virt_iov(iov, &task->s.iovs[0], buf);
1277 			task->src_domain = NULL;
1278 			task->s.iovs = iov;
1279 		}
1280 		if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) {
1281 			iov = &task->aux_iovs[SPDK_ACCEL_AXU_IOV_VIRT_DST];
1282 			assert(task->d.iovcnt == 1);
1283 			accel_update_virt_iov(iov, &task->d.iovs[0], buf);
1284 			task->dst_domain = NULL;
1285 			task->d.iovs = iov;
1286 		}
1287 	}
1288 }
1289 
1290 static void accel_process_sequence(struct spdk_accel_sequence *seq);
1291 
1292 static void
1293 accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf)
1294 {
1295 	struct accel_buffer *accel_buf;
1296 
1297 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1298 
1299 	assert(accel_buf->seq != NULL);
1300 	assert(accel_buf->buf == NULL);
1301 	accel_buf->buf = buf;
1302 
1303 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1304 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1305 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1306 	accel_process_sequence(accel_buf->seq);
1307 }
1308 
1309 static bool
1310 accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf,
1311 			 spdk_iobuf_get_cb cb_fn)
1312 {
1313 	struct accel_io_channel *ch = seq->ch;
1314 
1315 	assert(buf->buf == NULL);
1316 	assert(buf->seq == NULL);
1317 
1318 	buf->seq = seq;
1319 	buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn);
1320 	if (buf->buf == NULL) {
1321 		accel_update_stats(ch, retry.iobuf, 1);
1322 		return false;
1323 	}
1324 
1325 	return true;
1326 }
1327 
1328 static bool
1329 accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1330 {
1331 	/* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to
1332 	 * NULL */
1333 	if (task->src_domain == g_accel_domain) {
1334 		if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx,
1335 					      accel_iobuf_get_virtbuf_cb)) {
1336 			return false;
1337 		}
1338 
1339 		accel_sequence_set_virtbuf(seq, task->src_domain_ctx);
1340 	}
1341 
1342 	if (task->dst_domain == g_accel_domain) {
1343 		if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx,
1344 					      accel_iobuf_get_virtbuf_cb)) {
1345 			return false;
1346 		}
1347 
1348 		accel_sequence_set_virtbuf(seq, task->dst_domain_ctx);
1349 	}
1350 
1351 	return true;
1352 }
1353 
1354 static void
1355 accel_sequence_get_buf_cb(struct spdk_iobuf_entry *entry, void *buf)
1356 {
1357 	struct accel_buffer *accel_buf;
1358 
1359 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1360 
1361 	assert(accel_buf->seq != NULL);
1362 	assert(accel_buf->buf == NULL);
1363 	accel_buf->buf = buf;
1364 
1365 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1366 	accel_buf->cb_fn(accel_buf->seq, accel_buf->cb_ctx);
1367 }
1368 
1369 bool
1370 spdk_accel_alloc_sequence_buf(struct spdk_accel_sequence *seq, void *buf,
1371 			      struct spdk_memory_domain *domain, void *domain_ctx,
1372 			      spdk_accel_sequence_get_buf_cb cb_fn, void *cb_ctx)
1373 {
1374 	struct accel_buffer *accel_buf = domain_ctx;
1375 
1376 	assert(domain == g_accel_domain);
1377 	accel_buf->cb_fn = cb_fn;
1378 	accel_buf->cb_ctx = cb_ctx;
1379 
1380 	if (!accel_sequence_alloc_buf(seq, accel_buf, accel_sequence_get_buf_cb)) {
1381 		return false;
1382 	}
1383 
1384 	accel_sequence_set_virtbuf(seq, accel_buf);
1385 
1386 	return true;
1387 }
1388 
1389 struct spdk_accel_task *
1390 spdk_accel_sequence_first_task(struct spdk_accel_sequence *seq)
1391 {
1392 	return TAILQ_FIRST(&seq->tasks);
1393 }
1394 
1395 struct spdk_accel_task *
1396 spdk_accel_sequence_next_task(struct spdk_accel_task *task)
1397 {
1398 	return TAILQ_NEXT(task, seq_link);
1399 }
1400 
1401 static inline void
1402 accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs,
1403 			uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx,
1404 			struct accel_buffer *buf)
1405 {
1406 	bounce->orig_iovs = *iovs;
1407 	bounce->orig_iovcnt = *iovcnt;
1408 	bounce->orig_domain = *domain;
1409 	bounce->orig_domain_ctx = *domain_ctx;
1410 	bounce->iov.iov_base = buf->buf;
1411 	bounce->iov.iov_len = buf->len;
1412 
1413 	*iovs = &bounce->iov;
1414 	*iovcnt = 1;
1415 	*domain = NULL;
1416 }
1417 
1418 static void
1419 accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1420 {
1421 	struct spdk_accel_task *task;
1422 	struct accel_buffer *accel_buf;
1423 
1424 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1425 	assert(accel_buf->buf == NULL);
1426 	accel_buf->buf = buf;
1427 
1428 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1429 	assert(task != NULL);
1430 
1431 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1432 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1433 	accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain,
1434 				&task->src_domain_ctx, accel_buf);
1435 	accel_process_sequence(accel_buf->seq);
1436 }
1437 
1438 static void
1439 accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1440 {
1441 	struct spdk_accel_task *task;
1442 	struct accel_buffer *accel_buf;
1443 
1444 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1445 	assert(accel_buf->buf == NULL);
1446 	accel_buf->buf = buf;
1447 
1448 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1449 	assert(task != NULL);
1450 
1451 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1452 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1453 	accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain,
1454 				&task->dst_domain_ctx, accel_buf);
1455 	accel_process_sequence(accel_buf->seq);
1456 }
1457 
1458 static int
1459 accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1460 {
1461 	struct accel_buffer *buf;
1462 
1463 	if (task->src_domain != NULL) {
1464 		/* By the time we're here, accel buffers should have been allocated */
1465 		assert(task->src_domain != g_accel_domain);
1466 
1467 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt));
1468 		if (buf == NULL) {
1469 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1470 			return -ENOMEM;
1471 		}
1472 
1473 		TAILQ_INSERT_TAIL(&seq->bounce_bufs, buf, link);
1474 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) {
1475 			return -EAGAIN;
1476 		}
1477 
1478 		accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt,
1479 					&task->src_domain, &task->src_domain_ctx, buf);
1480 	}
1481 
1482 	if (task->dst_domain != NULL) {
1483 		/* By the time we're here, accel buffers should have been allocated */
1484 		assert(task->dst_domain != g_accel_domain);
1485 
1486 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt));
1487 		if (buf == NULL) {
1488 			/* The src buffer will be released when a sequence is completed */
1489 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1490 			return -ENOMEM;
1491 		}
1492 
1493 		TAILQ_INSERT_TAIL(&seq->bounce_bufs, buf, link);
1494 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) {
1495 			return -EAGAIN;
1496 		}
1497 
1498 		accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt,
1499 					&task->dst_domain, &task->dst_domain_ctx, buf);
1500 	}
1501 
1502 	return 0;
1503 }
1504 
1505 static void
1506 accel_task_pull_data_cb(void *ctx, int status)
1507 {
1508 	struct spdk_accel_sequence *seq = ctx;
1509 
1510 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1511 	if (spdk_likely(status == 0)) {
1512 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1513 	} else {
1514 		accel_sequence_set_fail(seq, status);
1515 	}
1516 
1517 	accel_process_sequence(seq);
1518 }
1519 
1520 static void
1521 accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1522 {
1523 	int rc;
1524 
1525 	assert(task->bounce.s.orig_iovs != NULL);
1526 	assert(task->bounce.s.orig_domain != NULL);
1527 	assert(task->bounce.s.orig_domain != g_accel_domain);
1528 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1529 
1530 	rc = spdk_memory_domain_pull_data(task->bounce.s.orig_domain,
1531 					  task->bounce.s.orig_domain_ctx,
1532 					  task->bounce.s.orig_iovs, task->bounce.s.orig_iovcnt,
1533 					  task->s.iovs, task->s.iovcnt,
1534 					  accel_task_pull_data_cb, seq);
1535 	if (spdk_unlikely(rc != 0)) {
1536 		SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n",
1537 			    spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc);
1538 		accel_sequence_set_fail(seq, rc);
1539 	}
1540 }
1541 
1542 static void
1543 accel_task_push_data_cb(void *ctx, int status)
1544 {
1545 	struct spdk_accel_sequence *seq = ctx;
1546 
1547 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1548 	if (spdk_likely(status == 0)) {
1549 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1550 	} else {
1551 		accel_sequence_set_fail(seq, status);
1552 	}
1553 
1554 	accel_process_sequence(seq);
1555 }
1556 
1557 static void
1558 accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1559 {
1560 	int rc;
1561 
1562 	assert(task->bounce.d.orig_iovs != NULL);
1563 	assert(task->bounce.d.orig_domain != NULL);
1564 	assert(task->bounce.d.orig_domain != g_accel_domain);
1565 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1566 
1567 	rc = spdk_memory_domain_push_data(task->bounce.d.orig_domain,
1568 					  task->bounce.d.orig_domain_ctx,
1569 					  task->bounce.d.orig_iovs, task->bounce.d.orig_iovcnt,
1570 					  task->d.iovs, task->d.iovcnt,
1571 					  accel_task_push_data_cb, seq);
1572 	if (spdk_unlikely(rc != 0)) {
1573 		SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n",
1574 			    spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc);
1575 		accel_sequence_set_fail(seq, rc);
1576 	}
1577 }
1578 
1579 static void
1580 accel_process_sequence(struct spdk_accel_sequence *seq)
1581 {
1582 	struct accel_io_channel *accel_ch = seq->ch;
1583 	struct spdk_accel_task *task;
1584 	enum accel_sequence_state state;
1585 	int rc;
1586 
1587 	/* Prevent recursive calls to this function */
1588 	if (spdk_unlikely(seq->in_process_sequence)) {
1589 		return;
1590 	}
1591 	seq->in_process_sequence = true;
1592 
1593 	task = TAILQ_FIRST(&seq->tasks);
1594 	do {
1595 		state = seq->state;
1596 		switch (state) {
1597 		case ACCEL_SEQUENCE_STATE_INIT:
1598 			if (g_accel_driver != NULL) {
1599 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS);
1600 				break;
1601 			}
1602 		/* Fall through */
1603 		case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF:
1604 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1605 			if (!accel_sequence_check_virtbuf(seq, task)) {
1606 				/* We couldn't allocate a buffer, wait until one is available */
1607 				break;
1608 			}
1609 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1610 		/* Fall through */
1611 		case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF:
1612 			/* If a module supports memory domains, we don't need to allocate bounce
1613 			 * buffers */
1614 			if (g_modules_opc[task->op_code].supports_memory_domains) {
1615 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1616 				break;
1617 			}
1618 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1619 			rc = accel_sequence_check_bouncebuf(seq, task);
1620 			if (spdk_unlikely(rc != 0)) {
1621 				/* We couldn't allocate a buffer, wait until one is available */
1622 				if (rc == -EAGAIN) {
1623 					break;
1624 				}
1625 				accel_sequence_set_fail(seq, rc);
1626 				break;
1627 			}
1628 			if (task->s.iovs == &task->bounce.s.iov) {
1629 				assert(task->bounce.s.orig_iovs);
1630 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA);
1631 				break;
1632 			}
1633 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1634 		/* Fall through */
1635 		case ACCEL_SEQUENCE_STATE_EXEC_TASK:
1636 			SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n",
1637 				      g_opcode_strings[task->op_code], seq);
1638 
1639 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK);
1640 			rc = accel_submit_task(accel_ch, task);
1641 			if (spdk_unlikely(rc != 0)) {
1642 				SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n",
1643 					    g_opcode_strings[task->op_code], seq);
1644 				accel_sequence_set_fail(seq, rc);
1645 			}
1646 			break;
1647 		case ACCEL_SEQUENCE_STATE_PULL_DATA:
1648 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1649 			accel_task_pull_data(seq, task);
1650 			break;
1651 		case ACCEL_SEQUENCE_STATE_COMPLETE_TASK:
1652 			if (task->d.iovs == &task->bounce.d.iov) {
1653 				assert(task->bounce.d.orig_iovs);
1654 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA);
1655 				break;
1656 			}
1657 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1658 			break;
1659 		case ACCEL_SEQUENCE_STATE_PUSH_DATA:
1660 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1661 			accel_task_push_data(seq, task);
1662 			break;
1663 		case ACCEL_SEQUENCE_STATE_NEXT_TASK:
1664 			TAILQ_REMOVE(&seq->tasks, task, seq_link);
1665 			TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1666 			/* Check if there are any remaining tasks */
1667 			task = TAILQ_FIRST(&seq->tasks);
1668 			if (task == NULL) {
1669 				/* Immediately return here to make sure we don't touch the sequence
1670 				 * after it's completed */
1671 				accel_sequence_complete(seq);
1672 				return;
1673 			}
1674 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT);
1675 			break;
1676 		case ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS:
1677 			assert(!TAILQ_EMPTY(&seq->tasks));
1678 
1679 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS);
1680 			rc = g_accel_driver->execute_sequence(accel_ch->driver_channel, seq);
1681 			if (spdk_unlikely(rc != 0)) {
1682 				SPDK_ERRLOG("Failed to execute sequence: %p using driver: %s\n",
1683 					    seq, g_accel_driver->name);
1684 				accel_sequence_set_fail(seq, rc);
1685 			}
1686 			break;
1687 		case ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS:
1688 			/* Get the task again, as the driver might have completed some tasks
1689 			 * synchronously */
1690 			task = TAILQ_FIRST(&seq->tasks);
1691 			if (task == NULL) {
1692 				/* Immediately return here to make sure we don't touch the sequence
1693 				 * after it's completed */
1694 				accel_sequence_complete(seq);
1695 				return;
1696 			}
1697 			/* We don't want to execute the next task through the driver, so we
1698 			 * explicitly omit the INIT state here */
1699 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1700 			break;
1701 		case ACCEL_SEQUENCE_STATE_ERROR:
1702 			/* Immediately return here to make sure we don't touch the sequence
1703 			 * after it's completed */
1704 			assert(seq->status != 0);
1705 			accel_sequence_complete(seq);
1706 			return;
1707 		case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF:
1708 		case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF:
1709 		case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA:
1710 		case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1711 		case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA:
1712 		case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS:
1713 			break;
1714 		default:
1715 			assert(0 && "bad state");
1716 			break;
1717 		}
1718 	} while (seq->state != state);
1719 
1720 	seq->in_process_sequence = false;
1721 }
1722 
1723 static void
1724 accel_sequence_task_cb(void *cb_arg, int status)
1725 {
1726 	struct spdk_accel_sequence *seq = cb_arg;
1727 	struct spdk_accel_task *task = TAILQ_FIRST(&seq->tasks);
1728 	struct accel_io_channel *accel_ch = seq->ch;
1729 
1730 	/* spdk_accel_task_complete() puts the task back to the task pool, but we don't want to do
1731 	 * that if a task is part of a sequence.  Removing the task from that pool here is the
1732 	 * easiest way to prevent this, even though it is a bit hacky.
1733 	 */
1734 	assert(task != NULL);
1735 	TAILQ_REMOVE(&accel_ch->task_pool, task, link);
1736 
1737 	switch (seq->state) {
1738 	case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1739 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK);
1740 		if (spdk_unlikely(status != 0)) {
1741 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n",
1742 				    g_opcode_strings[task->op_code], seq);
1743 			accel_sequence_set_fail(seq, status);
1744 		}
1745 
1746 		accel_process_sequence(seq);
1747 		break;
1748 	case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS:
1749 		assert(g_accel_driver != NULL);
1750 		/* Immediately remove the task from the outstanding list to make sure the next call
1751 		 * to spdk_accel_sequence_first_task() doesn't return it */
1752 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1753 		TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1754 
1755 		if (spdk_unlikely(status != 0)) {
1756 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p through "
1757 				    "driver: %s\n", g_opcode_strings[task->op_code], seq,
1758 				    g_accel_driver->name);
1759 			/* Update status without using accel_sequence_set_fail() to avoid changing
1760 			 * seq's state to ERROR until driver calls spdk_accel_sequence_continue() */
1761 			seq->status = status;
1762 		}
1763 		break;
1764 	default:
1765 		assert(0 && "bad state");
1766 		break;
1767 	}
1768 }
1769 
1770 void
1771 spdk_accel_sequence_continue(struct spdk_accel_sequence *seq)
1772 {
1773 	assert(g_accel_driver != NULL);
1774 	assert(seq->state == ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS);
1775 
1776 	if (spdk_likely(seq->status == 0)) {
1777 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS);
1778 	} else {
1779 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
1780 	}
1781 
1782 	accel_process_sequence(seq);
1783 }
1784 
1785 static bool
1786 accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt)
1787 {
1788 	/* For now, just do a dumb check that the iovecs arrays are exactly the same */
1789 	if (iovacnt != iovbcnt) {
1790 		return false;
1791 	}
1792 
1793 	return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0;
1794 }
1795 
1796 static bool
1797 accel_task_set_dstbuf(struct spdk_accel_task *task, struct spdk_accel_task *next)
1798 {
1799 	struct spdk_accel_task *prev;
1800 
1801 	switch (task->op_code) {
1802 	case SPDK_ACCEL_OPC_DECOMPRESS:
1803 	case SPDK_ACCEL_OPC_FILL:
1804 	case SPDK_ACCEL_OPC_ENCRYPT:
1805 	case SPDK_ACCEL_OPC_DECRYPT:
1806 		if (task->dst_domain != next->src_domain) {
1807 			return false;
1808 		}
1809 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
1810 					next->s.iovs, next->s.iovcnt)) {
1811 			return false;
1812 		}
1813 		task->d.iovs = next->d.iovs;
1814 		task->d.iovcnt = next->d.iovcnt;
1815 		task->dst_domain = next->dst_domain;
1816 		task->dst_domain_ctx = next->dst_domain_ctx;
1817 		break;
1818 	case SPDK_ACCEL_OPC_CRC32C:
1819 		/* crc32 is special, because it doesn't have a dst buffer */
1820 		if (task->src_domain != next->src_domain) {
1821 			return false;
1822 		}
1823 		if (!accel_compare_iovs(task->s.iovs, task->s.iovcnt,
1824 					next->s.iovs, next->s.iovcnt)) {
1825 			return false;
1826 		}
1827 		/* We can only change crc32's buffer if we can change previous task's buffer */
1828 		prev = TAILQ_PREV(task, accel_sequence_tasks, seq_link);
1829 		if (prev == NULL) {
1830 			return false;
1831 		}
1832 		if (!accel_task_set_dstbuf(prev, next)) {
1833 			return false;
1834 		}
1835 		task->s.iovs = next->d.iovs;
1836 		task->s.iovcnt = next->d.iovcnt;
1837 		task->src_domain = next->dst_domain;
1838 		task->src_domain_ctx = next->dst_domain_ctx;
1839 		break;
1840 	default:
1841 		return false;
1842 	}
1843 
1844 	return true;
1845 }
1846 
1847 static void
1848 accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task,
1849 			   struct spdk_accel_task **next_task)
1850 {
1851 	struct spdk_accel_task *next = *next_task;
1852 
1853 	switch (task->op_code) {
1854 	case SPDK_ACCEL_OPC_COPY:
1855 		/* We only allow changing src of operations that actually have a src, e.g. we never
1856 		 * do it for fill.  Theoretically, it is possible, but we'd have to be careful to
1857 		 * change the src of the operation after fill (which in turn could also be a fill).
1858 		 * So, for the sake of simplicity, skip this type of operations for now.
1859 		 */
1860 		if (next->op_code != SPDK_ACCEL_OPC_DECOMPRESS &&
1861 		    next->op_code != SPDK_ACCEL_OPC_COPY &&
1862 		    next->op_code != SPDK_ACCEL_OPC_ENCRYPT &&
1863 		    next->op_code != SPDK_ACCEL_OPC_DECRYPT &&
1864 		    next->op_code != SPDK_ACCEL_OPC_CRC32C) {
1865 			break;
1866 		}
1867 		if (task->dst_domain != next->src_domain) {
1868 			break;
1869 		}
1870 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
1871 					next->s.iovs, next->s.iovcnt)) {
1872 			break;
1873 		}
1874 		next->s.iovs = task->s.iovs;
1875 		next->s.iovcnt = task->s.iovcnt;
1876 		next->src_domain = task->src_domain;
1877 		next->src_domain_ctx = task->src_domain_ctx;
1878 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1879 		TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1880 		break;
1881 	case SPDK_ACCEL_OPC_DECOMPRESS:
1882 	case SPDK_ACCEL_OPC_FILL:
1883 	case SPDK_ACCEL_OPC_ENCRYPT:
1884 	case SPDK_ACCEL_OPC_DECRYPT:
1885 	case SPDK_ACCEL_OPC_CRC32C:
1886 		/* We can only merge tasks when one of them is a copy */
1887 		if (next->op_code != SPDK_ACCEL_OPC_COPY) {
1888 			break;
1889 		}
1890 		if (!accel_task_set_dstbuf(task, next)) {
1891 			break;
1892 		}
1893 		/* We're removing next_task from the tasks queue, so we need to update its pointer,
1894 		 * so that the TAILQ_FOREACH_SAFE() loop below works correctly */
1895 		*next_task = TAILQ_NEXT(next, seq_link);
1896 		TAILQ_REMOVE(&seq->tasks, next, seq_link);
1897 		TAILQ_INSERT_TAIL(&seq->completed, next, seq_link);
1898 		break;
1899 	default:
1900 		assert(0 && "bad opcode");
1901 		break;
1902 	}
1903 }
1904 
1905 void
1906 spdk_accel_sequence_finish(struct spdk_accel_sequence *seq,
1907 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
1908 {
1909 	struct spdk_accel_task *task, *next;
1910 
1911 	/* Try to remove any copy operations if possible */
1912 	TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) {
1913 		if (next == NULL) {
1914 			break;
1915 		}
1916 		accel_sequence_merge_tasks(seq, task, &next);
1917 	}
1918 
1919 	seq->cb_fn = cb_fn;
1920 	seq->cb_arg = cb_arg;
1921 
1922 	accel_process_sequence(seq);
1923 }
1924 
1925 void
1926 spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq)
1927 {
1928 	struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks);
1929 	struct spdk_accel_task *task;
1930 
1931 	assert(TAILQ_EMPTY(&seq->completed));
1932 	TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link);
1933 
1934 	while (!TAILQ_EMPTY(&tasks)) {
1935 		task = TAILQ_FIRST(&tasks);
1936 		TAILQ_REMOVE(&tasks, task, seq_link);
1937 		TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link);
1938 	}
1939 }
1940 
1941 void
1942 spdk_accel_sequence_abort(struct spdk_accel_sequence *seq)
1943 {
1944 	if (seq == NULL) {
1945 		return;
1946 	}
1947 
1948 	accel_sequence_complete_tasks(seq);
1949 	accel_sequence_put(seq);
1950 }
1951 
1952 struct spdk_memory_domain *
1953 spdk_accel_get_memory_domain(void)
1954 {
1955 	return g_accel_domain;
1956 }
1957 
1958 static struct spdk_accel_module_if *
1959 _module_find_by_name(const char *name)
1960 {
1961 	struct spdk_accel_module_if *accel_module = NULL;
1962 
1963 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
1964 		if (strcmp(name, accel_module->name) == 0) {
1965 			break;
1966 		}
1967 	}
1968 
1969 	return accel_module;
1970 }
1971 
1972 static inline struct spdk_accel_crypto_key *
1973 _accel_crypto_key_get(const char *name)
1974 {
1975 	struct spdk_accel_crypto_key *key;
1976 
1977 	assert(spdk_spin_held(&g_keyring_spin));
1978 
1979 	TAILQ_FOREACH(key, &g_keyring, link) {
1980 		if (strcmp(name, key->param.key_name) == 0) {
1981 			return key;
1982 		}
1983 	}
1984 
1985 	return NULL;
1986 }
1987 
1988 static void
1989 accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key)
1990 {
1991 	if (key->param.hex_key) {
1992 		spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2);
1993 		free(key->param.hex_key);
1994 	}
1995 	if (key->param.hex_key2) {
1996 		spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2);
1997 		free(key->param.hex_key2);
1998 	}
1999 	free(key->param.tweak_mode);
2000 	free(key->param.key_name);
2001 	free(key->param.cipher);
2002 	if (key->key) {
2003 		spdk_memset_s(key->key, key->key_size, 0, key->key_size);
2004 		free(key->key);
2005 	}
2006 	if (key->key2) {
2007 		spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size);
2008 		free(key->key2);
2009 	}
2010 	free(key);
2011 }
2012 
2013 static void
2014 accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key)
2015 {
2016 	assert(key->module_if);
2017 	assert(key->module_if->crypto_key_deinit);
2018 
2019 	key->module_if->crypto_key_deinit(key);
2020 	accel_crypto_key_free_mem(key);
2021 }
2022 
2023 /*
2024  * This function mitigates a timing side channel which could be caused by using strcmp()
2025  * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in
2026  * the article [1] for more details
2027  * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html
2028  */
2029 static bool
2030 accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len)
2031 {
2032 	size_t i;
2033 	volatile size_t x = k1_len ^ k2_len;
2034 
2035 	for (i = 0; ((i < k1_len) & (i < k2_len)); i++) {
2036 		x |= k1[i] ^ k2[i];
2037 	}
2038 
2039 	return x == 0;
2040 }
2041 
2042 static const char *g_tweak_modes[] = {
2043 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA] = "SIMPLE_LBA",
2044 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_JOIN_NEG_LBA_WITH_LBA] = "JOIN_NEG_LBA_WITH_LBA",
2045 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_FULL_LBA] = "INCR_512_FULL_LBA",
2046 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_UPPER_LBA] = "INCR_512_UPPER_LBA",
2047 };
2048 
2049 static const char *g_ciphers[] = {
2050 	[SPDK_ACCEL_CIPHER_AES_CBC] = "AES_CBC",
2051 	[SPDK_ACCEL_CIPHER_AES_XTS] = "AES_XTS",
2052 };
2053 
2054 int
2055 spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param)
2056 {
2057 	struct spdk_accel_module_if *module;
2058 	struct spdk_accel_crypto_key *key;
2059 	size_t hex_key_size, hex_key2_size;
2060 	bool found = false;
2061 	size_t i;
2062 	int rc;
2063 
2064 	if (!param || !param->hex_key || !param->cipher || !param->key_name) {
2065 		return -EINVAL;
2066 	}
2067 
2068 	if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) {
2069 		/* hardly ever possible, but let's check and warn the user */
2070 		SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n");
2071 	}
2072 	module = g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module;
2073 
2074 	if (!module) {
2075 		SPDK_ERRLOG("No accel module found assigned for crypto operation\n");
2076 		return -ENOENT;
2077 	}
2078 
2079 	if (!module->crypto_key_init || !module->crypto_supports_cipher) {
2080 		SPDK_ERRLOG("Module %s doesn't support crypto operations\n", module->name);
2081 		return -ENOTSUP;
2082 	}
2083 
2084 	key = calloc(1, sizeof(*key));
2085 	if (!key) {
2086 		return -ENOMEM;
2087 	}
2088 
2089 	key->param.key_name = strdup(param->key_name);
2090 	if (!key->param.key_name) {
2091 		rc = -ENOMEM;
2092 		goto error;
2093 	}
2094 
2095 	for (i = 0; i < SPDK_COUNTOF(g_ciphers); ++i) {
2096 		assert(g_ciphers[i]);
2097 
2098 		if (strncmp(param->cipher, g_ciphers[i], strlen(g_ciphers[i])) == 0) {
2099 			key->cipher = i;
2100 			found = true;
2101 			break;
2102 		}
2103 	}
2104 
2105 	if (!found) {
2106 		SPDK_ERRLOG("Failed to parse cipher\n");
2107 		rc = -EINVAL;
2108 		goto error;
2109 	}
2110 
2111 	key->param.cipher = strdup(param->cipher);
2112 	if (!key->param.cipher) {
2113 		rc = -ENOMEM;
2114 		goto error;
2115 	}
2116 
2117 	hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2118 	if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
2119 		SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2120 		rc = -EINVAL;
2121 		goto error;
2122 	}
2123 
2124 	if (hex_key_size == 0) {
2125 		SPDK_ERRLOG("key1 size cannot be 0\n");
2126 		rc = -EINVAL;
2127 		goto error;
2128 	}
2129 
2130 	key->param.hex_key = strdup(param->hex_key);
2131 	if (!key->param.hex_key) {
2132 		rc = -ENOMEM;
2133 		goto error;
2134 	}
2135 
2136 	key->key_size = hex_key_size / 2;
2137 	key->key = spdk_unhexlify(key->param.hex_key);
2138 	if (!key->key) {
2139 		SPDK_ERRLOG("Failed to unhexlify key1\n");
2140 		rc = -EINVAL;
2141 		goto error;
2142 	}
2143 
2144 	if (param->hex_key2) {
2145 		hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2146 		if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
2147 			SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2148 			rc = -EINVAL;
2149 			goto error;
2150 		}
2151 
2152 		if (hex_key2_size == 0) {
2153 			SPDK_ERRLOG("key2 size cannot be 0\n");
2154 			rc = -EINVAL;
2155 			goto error;
2156 		}
2157 
2158 		key->param.hex_key2 = strdup(param->hex_key2);
2159 		if (!key->param.hex_key2) {
2160 			rc = -ENOMEM;
2161 			goto error;
2162 		}
2163 
2164 		key->key2_size = hex_key2_size / 2;
2165 		key->key2 = spdk_unhexlify(key->param.hex_key2);
2166 		if (!key->key2) {
2167 			SPDK_ERRLOG("Failed to unhexlify key2\n");
2168 			rc = -EINVAL;
2169 			goto error;
2170 		}
2171 	}
2172 
2173 	key->tweak_mode = ACCEL_CRYPTO_TWEAK_MODE_DEFAULT;
2174 	if (param->tweak_mode) {
2175 		found = false;
2176 
2177 		key->param.tweak_mode = strdup(param->tweak_mode);
2178 		if (!key->param.tweak_mode) {
2179 			rc = -ENOMEM;
2180 			goto error;
2181 		}
2182 
2183 		for (i = 0; i < SPDK_COUNTOF(g_tweak_modes); ++i) {
2184 			assert(g_tweak_modes[i]);
2185 
2186 			if (strncmp(param->tweak_mode, g_tweak_modes[i], strlen(g_tweak_modes[i])) == 0) {
2187 				key->tweak_mode = i;
2188 				found = true;
2189 				break;
2190 			}
2191 		}
2192 
2193 		if (!found) {
2194 			SPDK_ERRLOG("Failed to parse tweak mode\n");
2195 			rc = -EINVAL;
2196 			goto error;
2197 		}
2198 	}
2199 
2200 	if ((!module->crypto_supports_tweak_mode && key->tweak_mode != ACCEL_CRYPTO_TWEAK_MODE_DEFAULT) ||
2201 	    (module->crypto_supports_tweak_mode && !module->crypto_supports_tweak_mode(key->tweak_mode))) {
2202 		SPDK_ERRLOG("Module %s doesn't support %s tweak mode\n", module->name,
2203 			    g_tweak_modes[key->tweak_mode]);
2204 		rc = -EINVAL;
2205 		goto error;
2206 	}
2207 
2208 	if (!module->crypto_supports_cipher(key->cipher, key->key_size)) {
2209 		SPDK_ERRLOG("Module %s doesn't support %s cipher with %zu key size\n", module->name,
2210 			    g_ciphers[key->cipher], key->key_size);
2211 		rc = -EINVAL;
2212 		goto error;
2213 	}
2214 
2215 	if (key->cipher == SPDK_ACCEL_CIPHER_AES_XTS) {
2216 		if (!key->key2) {
2217 			SPDK_ERRLOG("%s key2 is missing\n", g_ciphers[key->cipher]);
2218 			rc = -EINVAL;
2219 			goto error;
2220 		}
2221 
2222 		if (key->key_size != key->key2_size) {
2223 			SPDK_ERRLOG("%s key size %zu is not equal to key2 size %zu\n", g_ciphers[key->cipher],
2224 				    key->key_size,
2225 				    key->key2_size);
2226 			rc = -EINVAL;
2227 			goto error;
2228 		}
2229 
2230 		if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) {
2231 			SPDK_ERRLOG("%s identical keys are not secure\n", g_ciphers[key->cipher]);
2232 			rc = -EINVAL;
2233 			goto error;
2234 		}
2235 	}
2236 
2237 	if (key->cipher == SPDK_ACCEL_CIPHER_AES_CBC) {
2238 		if (key->key2_size) {
2239 			SPDK_ERRLOG("%s doesn't use key2\n", g_ciphers[key->cipher]);
2240 			rc = -EINVAL;
2241 			goto error;
2242 		}
2243 	}
2244 
2245 	key->module_if = module;
2246 
2247 	spdk_spin_lock(&g_keyring_spin);
2248 	if (_accel_crypto_key_get(param->key_name)) {
2249 		rc = -EEXIST;
2250 	} else {
2251 		rc = module->crypto_key_init(key);
2252 		if (rc) {
2253 			SPDK_ERRLOG("Module %s failed to initialize crypto key\n", module->name);
2254 		} else {
2255 			TAILQ_INSERT_TAIL(&g_keyring, key, link);
2256 		}
2257 	}
2258 	spdk_spin_unlock(&g_keyring_spin);
2259 
2260 	if (rc) {
2261 		goto error;
2262 	}
2263 
2264 	return 0;
2265 
2266 error:
2267 	accel_crypto_key_free_mem(key);
2268 	return rc;
2269 }
2270 
2271 int
2272 spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key)
2273 {
2274 	if (!key || !key->module_if) {
2275 		return -EINVAL;
2276 	}
2277 
2278 	spdk_spin_lock(&g_keyring_spin);
2279 	if (!_accel_crypto_key_get(key->param.key_name)) {
2280 		spdk_spin_unlock(&g_keyring_spin);
2281 		return -ENOENT;
2282 	}
2283 	TAILQ_REMOVE(&g_keyring, key, link);
2284 	spdk_spin_unlock(&g_keyring_spin);
2285 
2286 	accel_crypto_key_destroy_unsafe(key);
2287 
2288 	return 0;
2289 }
2290 
2291 struct spdk_accel_crypto_key *
2292 spdk_accel_crypto_key_get(const char *name)
2293 {
2294 	struct spdk_accel_crypto_key *key;
2295 
2296 	spdk_spin_lock(&g_keyring_spin);
2297 	key = _accel_crypto_key_get(name);
2298 	spdk_spin_unlock(&g_keyring_spin);
2299 
2300 	return key;
2301 }
2302 
2303 /* Helper function when accel modules register with the framework. */
2304 void
2305 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module)
2306 {
2307 	struct spdk_accel_module_if *tmp;
2308 
2309 	if (_module_find_by_name(accel_module->name)) {
2310 		SPDK_NOTICELOG("Module %s already registered\n", accel_module->name);
2311 		assert(false);
2312 		return;
2313 	}
2314 
2315 	TAILQ_FOREACH(tmp, &spdk_accel_module_list, tailq) {
2316 		if (accel_module->priority < tmp->priority) {
2317 			break;
2318 		}
2319 	}
2320 
2321 	if (tmp != NULL) {
2322 		TAILQ_INSERT_BEFORE(tmp, accel_module, tailq);
2323 	} else {
2324 		TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq);
2325 	}
2326 }
2327 
2328 /* Framework level channel create callback. */
2329 static int
2330 accel_create_channel(void *io_device, void *ctx_buf)
2331 {
2332 	struct accel_io_channel	*accel_ch = ctx_buf;
2333 	struct spdk_accel_task *accel_task;
2334 	struct spdk_accel_sequence *seq;
2335 	struct accel_buffer *buf;
2336 	uint8_t *task_mem;
2337 	uint32_t i = 0, j;
2338 	int rc;
2339 
2340 	accel_ch->task_pool_base = calloc(g_opts.task_count, g_max_accel_module_size);
2341 	if (accel_ch->task_pool_base == NULL) {
2342 		return -ENOMEM;
2343 	}
2344 
2345 	accel_ch->seq_pool_base = calloc(g_opts.sequence_count, sizeof(struct spdk_accel_sequence));
2346 	if (accel_ch->seq_pool_base == NULL) {
2347 		goto err;
2348 	}
2349 
2350 	accel_ch->buf_pool_base = calloc(g_opts.buf_count, sizeof(struct accel_buffer));
2351 	if (accel_ch->buf_pool_base == NULL) {
2352 		goto err;
2353 	}
2354 
2355 	TAILQ_INIT(&accel_ch->task_pool);
2356 	TAILQ_INIT(&accel_ch->seq_pool);
2357 	TAILQ_INIT(&accel_ch->buf_pool);
2358 
2359 	task_mem = accel_ch->task_pool_base;
2360 	for (i = 0; i < g_opts.task_count; i++) {
2361 		accel_task = (struct spdk_accel_task *)task_mem;
2362 		TAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link);
2363 		task_mem += g_max_accel_module_size;
2364 	}
2365 	for (i = 0; i < g_opts.sequence_count; i++) {
2366 		seq = &accel_ch->seq_pool_base[i];
2367 		TAILQ_INSERT_TAIL(&accel_ch->seq_pool, seq, link);
2368 	}
2369 	for (i = 0; i < g_opts.buf_count; i++) {
2370 		buf = &accel_ch->buf_pool_base[i];
2371 		TAILQ_INSERT_TAIL(&accel_ch->buf_pool, buf, link);
2372 	}
2373 
2374 	/* Assign modules and get IO channels for each */
2375 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
2376 		accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel();
2377 		/* This can happen if idxd runs out of channels. */
2378 		if (accel_ch->module_ch[i] == NULL) {
2379 			SPDK_ERRLOG("Module %s failed to get io channel\n", g_modules_opc[i].module->name);
2380 			goto err;
2381 		}
2382 	}
2383 
2384 	if (g_accel_driver != NULL) {
2385 		accel_ch->driver_channel = g_accel_driver->get_io_channel();
2386 		if (accel_ch->driver_channel == NULL) {
2387 			SPDK_ERRLOG("Failed to get driver's IO channel\n");
2388 			goto err;
2389 		}
2390 	}
2391 
2392 	rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", g_opts.small_cache_size,
2393 				     g_opts.large_cache_size);
2394 	if (rc != 0) {
2395 		SPDK_ERRLOG("Failed to initialize iobuf accel channel\n");
2396 		goto err;
2397 	}
2398 
2399 	return 0;
2400 err:
2401 	if (accel_ch->driver_channel != NULL) {
2402 		spdk_put_io_channel(accel_ch->driver_channel);
2403 	}
2404 	for (j = 0; j < i; j++) {
2405 		spdk_put_io_channel(accel_ch->module_ch[j]);
2406 	}
2407 	free(accel_ch->task_pool_base);
2408 	free(accel_ch->seq_pool_base);
2409 	free(accel_ch->buf_pool_base);
2410 
2411 	return -ENOMEM;
2412 }
2413 
2414 static void
2415 accel_add_stats(struct accel_stats *total, struct accel_stats *stats)
2416 {
2417 	int i;
2418 
2419 	total->sequence_executed += stats->sequence_executed;
2420 	total->sequence_failed += stats->sequence_failed;
2421 	total->retry.task += stats->retry.task;
2422 	total->retry.sequence += stats->retry.sequence;
2423 	total->retry.iobuf += stats->retry.iobuf;
2424 	total->retry.bufdesc += stats->retry.bufdesc;
2425 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; ++i) {
2426 		total->operations[i].executed += stats->operations[i].executed;
2427 		total->operations[i].failed += stats->operations[i].failed;
2428 		total->operations[i].num_bytes += stats->operations[i].num_bytes;
2429 	}
2430 }
2431 
2432 /* Framework level channel destroy callback. */
2433 static void
2434 accel_destroy_channel(void *io_device, void *ctx_buf)
2435 {
2436 	struct accel_io_channel	*accel_ch = ctx_buf;
2437 	int i;
2438 
2439 	spdk_iobuf_channel_fini(&accel_ch->iobuf);
2440 
2441 	if (accel_ch->driver_channel != NULL) {
2442 		spdk_put_io_channel(accel_ch->driver_channel);
2443 	}
2444 
2445 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
2446 		assert(accel_ch->module_ch[i] != NULL);
2447 		spdk_put_io_channel(accel_ch->module_ch[i]);
2448 		accel_ch->module_ch[i] = NULL;
2449 	}
2450 
2451 	/* Update global stats to make sure channel's stats aren't lost after a channel is gone */
2452 	spdk_spin_lock(&g_stats_lock);
2453 	accel_add_stats(&g_stats, &accel_ch->stats);
2454 	spdk_spin_unlock(&g_stats_lock);
2455 
2456 	free(accel_ch->task_pool_base);
2457 	free(accel_ch->seq_pool_base);
2458 	free(accel_ch->buf_pool_base);
2459 }
2460 
2461 struct spdk_io_channel *
2462 spdk_accel_get_io_channel(void)
2463 {
2464 	return spdk_get_io_channel(&spdk_accel_module_list);
2465 }
2466 
2467 static int
2468 accel_module_initialize(void)
2469 {
2470 	struct spdk_accel_module_if *accel_module, *tmp_module;
2471 	int rc = 0, module_rc;
2472 
2473 	TAILQ_FOREACH_SAFE(accel_module, &spdk_accel_module_list, tailq, tmp_module) {
2474 		module_rc = accel_module->module_init();
2475 		if (module_rc) {
2476 			SPDK_ERRLOG("Module %s initialization failed with %d\n", accel_module->name, module_rc);
2477 			TAILQ_REMOVE(&spdk_accel_module_list, accel_module, tailq);
2478 			if (!rc) {
2479 				rc = module_rc;
2480 			}
2481 		}
2482 
2483 		SPDK_DEBUGLOG(accel, "Module %s initialized.\n", accel_module->name);
2484 	}
2485 
2486 	return rc;
2487 }
2488 
2489 static void
2490 accel_module_init_opcode(enum spdk_accel_opcode opcode)
2491 {
2492 	struct accel_module *module = &g_modules_opc[opcode];
2493 	struct spdk_accel_module_if *module_if = module->module;
2494 
2495 	if (module_if->get_memory_domains != NULL) {
2496 		module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0;
2497 	}
2498 }
2499 
2500 int
2501 spdk_accel_initialize(void)
2502 {
2503 	enum spdk_accel_opcode op;
2504 	struct spdk_accel_module_if *accel_module = NULL;
2505 	int rc;
2506 
2507 	/*
2508 	 * We need a unique identifier for the accel framework, so use the
2509 	 * spdk_accel_module_list address for this purpose.
2510 	 */
2511 	spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel,
2512 				sizeof(struct accel_io_channel), "accel");
2513 
2514 	spdk_spin_init(&g_keyring_spin);
2515 	spdk_spin_init(&g_stats_lock);
2516 
2517 	rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL,
2518 				       "SPDK_ACCEL_DMA_DEVICE");
2519 	if (rc != 0) {
2520 		SPDK_ERRLOG("Failed to create accel memory domain\n");
2521 		return rc;
2522 	}
2523 
2524 	g_modules_started = true;
2525 	rc = accel_module_initialize();
2526 	if (rc) {
2527 		return rc;
2528 	}
2529 
2530 	if (g_accel_driver != NULL && g_accel_driver->init != NULL) {
2531 		rc = g_accel_driver->init();
2532 		if (rc != 0) {
2533 			SPDK_ERRLOG("Failed to initialize driver %s: %s\n", g_accel_driver->name,
2534 				    spdk_strerror(-rc));
2535 			return rc;
2536 		}
2537 	}
2538 
2539 	/* The module list is order by priority, with the highest priority modules being at the end
2540 	 * of the list.  The software module should be somewhere at the beginning of the list,
2541 	 * before all HW modules.
2542 	 * NOTE: all opcodes must be supported by software in the event that no HW modules are
2543 	 * initialized to support the operation.
2544 	 */
2545 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2546 		for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
2547 			if (accel_module->supports_opcode(op)) {
2548 				g_modules_opc[op].module = accel_module;
2549 				SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name);
2550 			}
2551 		}
2552 
2553 		if (accel_module->get_ctx_size != NULL) {
2554 			g_max_accel_module_size = spdk_max(g_max_accel_module_size,
2555 							   accel_module->get_ctx_size());
2556 		}
2557 	}
2558 
2559 	/* Now lets check for overrides and apply all that exist */
2560 	for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
2561 		if (g_modules_opc_override[op] != NULL) {
2562 			accel_module = _module_find_by_name(g_modules_opc_override[op]);
2563 			if (accel_module == NULL) {
2564 				SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]);
2565 				return -EINVAL;
2566 			}
2567 			if (accel_module->supports_opcode(op) == false) {
2568 				SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op);
2569 				return -EINVAL;
2570 			}
2571 			g_modules_opc[op].module = accel_module;
2572 		}
2573 	}
2574 
2575 	if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) {
2576 		SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations");
2577 		return -EINVAL;
2578 	}
2579 
2580 	for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
2581 		assert(g_modules_opc[op].module != NULL);
2582 		accel_module_init_opcode(op);
2583 	}
2584 
2585 	rc = spdk_iobuf_register_module("accel");
2586 	if (rc != 0) {
2587 		SPDK_ERRLOG("Failed to register accel iobuf module\n");
2588 		return rc;
2589 	}
2590 
2591 	return 0;
2592 }
2593 
2594 static void
2595 accel_module_finish_cb(void)
2596 {
2597 	spdk_accel_fini_cb cb_fn = g_fini_cb_fn;
2598 
2599 	cb_fn(g_fini_cb_arg);
2600 	g_fini_cb_fn = NULL;
2601 	g_fini_cb_arg = NULL;
2602 }
2603 
2604 static void
2605 accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str,
2606 			   const char *module_str)
2607 {
2608 	spdk_json_write_object_begin(w);
2609 	spdk_json_write_named_string(w, "method", "accel_assign_opc");
2610 	spdk_json_write_named_object_begin(w, "params");
2611 	spdk_json_write_named_string(w, "opname", opc_str);
2612 	spdk_json_write_named_string(w, "module", module_str);
2613 	spdk_json_write_object_end(w);
2614 	spdk_json_write_object_end(w);
2615 }
2616 
2617 static void
2618 __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2619 {
2620 	spdk_json_write_named_string(w, "name", key->param.key_name);
2621 	spdk_json_write_named_string(w, "cipher", key->param.cipher);
2622 	spdk_json_write_named_string(w, "key", key->param.hex_key);
2623 	if (key->param.hex_key2) {
2624 		spdk_json_write_named_string(w, "key2", key->param.hex_key2);
2625 	}
2626 
2627 	if (key->param.tweak_mode) {
2628 		spdk_json_write_named_string(w, "tweak_mode", key->param.tweak_mode);
2629 	}
2630 }
2631 
2632 void
2633 _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2634 {
2635 	spdk_json_write_object_begin(w);
2636 	__accel_crypto_key_dump_param(w, key);
2637 	spdk_json_write_object_end(w);
2638 }
2639 
2640 static void
2641 _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w,
2642 				    struct spdk_accel_crypto_key *key)
2643 {
2644 	spdk_json_write_object_begin(w);
2645 	spdk_json_write_named_string(w, "method", "accel_crypto_key_create");
2646 	spdk_json_write_named_object_begin(w, "params");
2647 	__accel_crypto_key_dump_param(w, key);
2648 	spdk_json_write_object_end(w);
2649 	spdk_json_write_object_end(w);
2650 }
2651 
2652 static void
2653 accel_write_options(struct spdk_json_write_ctx *w)
2654 {
2655 	spdk_json_write_object_begin(w);
2656 	spdk_json_write_named_string(w, "method", "accel_set_options");
2657 	spdk_json_write_named_object_begin(w, "params");
2658 	spdk_json_write_named_uint32(w, "small_cache_size", g_opts.small_cache_size);
2659 	spdk_json_write_named_uint32(w, "large_cache_size", g_opts.large_cache_size);
2660 	spdk_json_write_named_uint32(w, "task_count", g_opts.task_count);
2661 	spdk_json_write_named_uint32(w, "sequence_count", g_opts.sequence_count);
2662 	spdk_json_write_named_uint32(w, "buf_count", g_opts.buf_count);
2663 	spdk_json_write_object_end(w);
2664 	spdk_json_write_object_end(w);
2665 }
2666 
2667 static void
2668 _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump)
2669 {
2670 	struct spdk_accel_crypto_key *key;
2671 
2672 	spdk_spin_lock(&g_keyring_spin);
2673 	TAILQ_FOREACH(key, &g_keyring, link) {
2674 		if (full_dump) {
2675 			_accel_crypto_key_write_config_json(w, key);
2676 		} else {
2677 			_accel_crypto_key_dump_param(w, key);
2678 		}
2679 	}
2680 	spdk_spin_unlock(&g_keyring_spin);
2681 }
2682 
2683 void
2684 _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w)
2685 {
2686 	_accel_crypto_keys_write_config_json(w, false);
2687 }
2688 
2689 void
2690 spdk_accel_write_config_json(struct spdk_json_write_ctx *w)
2691 {
2692 	struct spdk_accel_module_if *accel_module;
2693 	int i;
2694 
2695 	spdk_json_write_array_begin(w);
2696 	accel_write_options(w);
2697 
2698 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2699 		if (accel_module->write_config_json) {
2700 			accel_module->write_config_json(w);
2701 		}
2702 	}
2703 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
2704 		if (g_modules_opc_override[i]) {
2705 			accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]);
2706 		}
2707 	}
2708 
2709 	_accel_crypto_keys_write_config_json(w, true);
2710 
2711 	spdk_json_write_array_end(w);
2712 }
2713 
2714 void
2715 spdk_accel_module_finish(void)
2716 {
2717 	if (!g_accel_module) {
2718 		g_accel_module = TAILQ_FIRST(&spdk_accel_module_list);
2719 	} else {
2720 		g_accel_module = TAILQ_NEXT(g_accel_module, tailq);
2721 	}
2722 
2723 	if (!g_accel_module) {
2724 		if (g_accel_driver != NULL && g_accel_driver->fini != NULL) {
2725 			g_accel_driver->fini();
2726 		}
2727 
2728 		spdk_spin_destroy(&g_keyring_spin);
2729 		spdk_spin_destroy(&g_stats_lock);
2730 		if (g_accel_domain) {
2731 			spdk_memory_domain_destroy(g_accel_domain);
2732 			g_accel_domain = NULL;
2733 		}
2734 		accel_module_finish_cb();
2735 		return;
2736 	}
2737 
2738 	if (g_accel_module->module_fini) {
2739 		spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL);
2740 	} else {
2741 		spdk_accel_module_finish();
2742 	}
2743 }
2744 
2745 static void
2746 accel_io_device_unregister_cb(void *io_device)
2747 {
2748 	struct spdk_accel_crypto_key *key, *key_tmp;
2749 	enum spdk_accel_opcode op;
2750 
2751 	spdk_spin_lock(&g_keyring_spin);
2752 	TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) {
2753 		accel_crypto_key_destroy_unsafe(key);
2754 	}
2755 	spdk_spin_unlock(&g_keyring_spin);
2756 
2757 	for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
2758 		if (g_modules_opc_override[op] != NULL) {
2759 			free(g_modules_opc_override[op]);
2760 			g_modules_opc_override[op] = NULL;
2761 		}
2762 		g_modules_opc[op].module = NULL;
2763 	}
2764 
2765 	spdk_accel_module_finish();
2766 }
2767 
2768 void
2769 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg)
2770 {
2771 	assert(cb_fn != NULL);
2772 
2773 	g_fini_cb_fn = cb_fn;
2774 	g_fini_cb_arg = cb_arg;
2775 
2776 	spdk_io_device_unregister(&spdk_accel_module_list, accel_io_device_unregister_cb);
2777 }
2778 
2779 static struct spdk_accel_driver *
2780 accel_find_driver(const char *name)
2781 {
2782 	struct spdk_accel_driver *driver;
2783 
2784 	TAILQ_FOREACH(driver, &g_accel_drivers, tailq) {
2785 		if (strcmp(driver->name, name) == 0) {
2786 			return driver;
2787 		}
2788 	}
2789 
2790 	return NULL;
2791 }
2792 
2793 int
2794 spdk_accel_set_driver(const char *name)
2795 {
2796 	struct spdk_accel_driver *driver;
2797 
2798 	driver = accel_find_driver(name);
2799 	if (driver == NULL) {
2800 		SPDK_ERRLOG("Couldn't find driver named '%s'\n", name);
2801 		return -ENODEV;
2802 	}
2803 
2804 	g_accel_driver = driver;
2805 
2806 	return 0;
2807 }
2808 
2809 void
2810 spdk_accel_driver_register(struct spdk_accel_driver *driver)
2811 {
2812 	if (accel_find_driver(driver->name)) {
2813 		SPDK_ERRLOG("Driver named '%s' has already been registered\n", driver->name);
2814 		assert(0);
2815 		return;
2816 	}
2817 
2818 	TAILQ_INSERT_TAIL(&g_accel_drivers, driver, tailq);
2819 }
2820 
2821 int
2822 spdk_accel_set_opts(const struct spdk_accel_opts *opts)
2823 {
2824 	if (opts->size > sizeof(*opts)) {
2825 		return -EINVAL;
2826 	}
2827 
2828 	memcpy(&g_opts, opts, opts->size);
2829 
2830 	return 0;
2831 }
2832 
2833 void
2834 spdk_accel_get_opts(struct spdk_accel_opts *opts)
2835 {
2836 	size_t size = opts->size;
2837 
2838 	assert(size <= sizeof(*opts));
2839 
2840 	memcpy(opts, &g_opts, spdk_min(sizeof(*opts), size));
2841 	opts->size = size;
2842 }
2843 
2844 struct accel_get_stats_ctx {
2845 	struct accel_stats	stats;
2846 	accel_get_stats_cb	cb_fn;
2847 	void			*cb_arg;
2848 };
2849 
2850 static void
2851 accel_get_channel_stats_done(struct spdk_io_channel_iter *iter, int status)
2852 {
2853 	struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
2854 
2855 	ctx->cb_fn(&ctx->stats, ctx->cb_arg);
2856 	free(ctx);
2857 }
2858 
2859 static void
2860 accel_get_channel_stats(struct spdk_io_channel_iter *iter)
2861 {
2862 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(iter);
2863 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
2864 	struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
2865 
2866 	accel_add_stats(&ctx->stats, &accel_ch->stats);
2867 	spdk_for_each_channel_continue(iter, 0);
2868 }
2869 
2870 int
2871 accel_get_stats(accel_get_stats_cb cb_fn, void *cb_arg)
2872 {
2873 	struct accel_get_stats_ctx *ctx;
2874 
2875 	ctx = calloc(1, sizeof(*ctx));
2876 	if (ctx == NULL) {
2877 		return -ENOMEM;
2878 	}
2879 
2880 	spdk_spin_lock(&g_stats_lock);
2881 	accel_add_stats(&ctx->stats, &g_stats);
2882 	spdk_spin_unlock(&g_stats_lock);
2883 
2884 	ctx->cb_fn = cb_fn;
2885 	ctx->cb_arg = cb_arg;
2886 
2887 	spdk_for_each_channel(&spdk_accel_module_list, accel_get_channel_stats, ctx,
2888 			      accel_get_channel_stats_done);
2889 
2890 	return 0;
2891 }
2892 
2893 void
2894 spdk_accel_get_opcode_stats(struct spdk_io_channel *ch, enum spdk_accel_opcode opcode,
2895 			    struct spdk_accel_opcode_stats *stats, size_t size)
2896 {
2897 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
2898 
2899 #define FIELD_OK(field) \
2900 	offsetof(struct spdk_accel_opcode_stats, field) + sizeof(stats->field) <= size
2901 
2902 #define SET_FIELD(field, value) \
2903 	if (FIELD_OK(field)) { \
2904 		stats->field = value; \
2905 	}
2906 
2907 	SET_FIELD(executed, accel_ch->stats.operations[opcode].executed);
2908 	SET_FIELD(failed, accel_ch->stats.operations[opcode].failed);
2909 	SET_FIELD(num_bytes, accel_ch->stats.operations[opcode].num_bytes);
2910 
2911 #undef FIELD_OK
2912 #undef SET_FIELD
2913 }
2914 
2915 uint8_t
2916 spdk_accel_get_buf_align(enum spdk_accel_opcode opcode,
2917 			 const struct spdk_accel_operation_exec_ctx *ctx)
2918 {
2919 	struct spdk_accel_module_if *module = g_modules_opc[opcode].module;
2920 	struct spdk_accel_opcode_info modinfo = {}, drvinfo = {};
2921 
2922 	if (g_accel_driver != NULL && g_accel_driver->get_operation_info != NULL) {
2923 		g_accel_driver->get_operation_info(opcode, ctx, &drvinfo);
2924 	}
2925 
2926 	if (module->get_operation_info != NULL) {
2927 		module->get_operation_info(opcode, ctx, &modinfo);
2928 	}
2929 
2930 	/* If a driver is set, it'll execute most of the operations, while the rest will usually
2931 	 * fall back to accel_sw, which doesn't have any alignment requiremenets.  However, to be
2932 	 * extra safe, return the max(driver, module) if a driver delegates some operations to a
2933 	 * hardware module. */
2934 	return spdk_max(modinfo.required_alignment, drvinfo.required_alignment);
2935 }
2936 
2937 struct spdk_accel_module_if *
2938 spdk_accel_get_module(const char *name)
2939 {
2940 	struct spdk_accel_module_if *module;
2941 
2942 	TAILQ_FOREACH(module, &spdk_accel_module_list, tailq) {
2943 		if (strcmp(module->name, name) == 0) {
2944 			return module;
2945 		}
2946 	}
2947 
2948 	return NULL;
2949 }
2950 
2951 SPDK_LOG_REGISTER_COMPONENT(accel)
2952