xref: /spdk/lib/accel/accel.c (revision 1a526000d070b65d326b2d442fb259497b561188)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2020 Intel Corporation.
3  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES.
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/accel_module.h"
10 
11 #include "accel_internal.h"
12 
13 #include "spdk/dma.h"
14 #include "spdk/env.h"
15 #include "spdk/likely.h"
16 #include "spdk/log.h"
17 #include "spdk/thread.h"
18 #include "spdk/json.h"
19 #include "spdk/crc32.h"
20 #include "spdk/util.h"
21 #include "spdk/hexlify.h"
22 
23 /* Accelerator Framework: The following provides a top level
24  * generic API for the accelerator functions defined here. Modules,
25  * such as the one in /module/accel/ioat, supply the implementation
26  * with the exception of the pure software implementation contained
27  * later in this file.
28  */
29 
30 #define ALIGN_4K			0x1000
31 #define MAX_TASKS_PER_CHANNEL		0x800
32 #define ACCEL_SMALL_CACHE_SIZE		128
33 #define ACCEL_LARGE_CACHE_SIZE		16
34 /* Set MSB, so we don't return NULL pointers as buffers */
35 #define ACCEL_BUFFER_BASE		((void *)(1ull << 63))
36 #define ACCEL_BUFFER_OFFSET_MASK	((uintptr_t)ACCEL_BUFFER_BASE - 1)
37 
38 struct accel_module {
39 	struct spdk_accel_module_if	*module;
40 	bool				supports_memory_domains;
41 };
42 
43 /* Largest context size for all accel modules */
44 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task);
45 
46 static struct spdk_accel_module_if *g_accel_module = NULL;
47 static spdk_accel_fini_cb g_fini_cb_fn = NULL;
48 static void *g_fini_cb_arg = NULL;
49 static bool g_modules_started = false;
50 static struct spdk_memory_domain *g_accel_domain;
51 
52 /* Global list of registered accelerator modules */
53 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list =
54 	TAILQ_HEAD_INITIALIZER(spdk_accel_module_list);
55 
56 /* Crypto keyring */
57 static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring);
58 static struct spdk_spinlock g_keyring_spin;
59 
60 /* Global array mapping capabilities to modules */
61 static struct accel_module g_modules_opc[ACCEL_OPC_LAST] = {};
62 static char *g_modules_opc_override[ACCEL_OPC_LAST] = {};
63 TAILQ_HEAD(, spdk_accel_driver) g_accel_drivers = TAILQ_HEAD_INITIALIZER(g_accel_drivers);
64 static struct spdk_accel_driver *g_accel_driver;
65 static struct spdk_accel_opts g_opts = {
66 	.small_cache_size = ACCEL_SMALL_CACHE_SIZE,
67 	.large_cache_size = ACCEL_LARGE_CACHE_SIZE,
68 };
69 static struct accel_stats g_stats;
70 static struct spdk_spinlock g_stats_lock;
71 
72 static const char *g_opcode_strings[ACCEL_OPC_LAST] = {
73 	"copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c",
74 	"compress", "decompress", "encrypt", "decrypt", "xor"
75 };
76 
77 enum accel_sequence_state {
78 	ACCEL_SEQUENCE_STATE_INIT,
79 	ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF,
80 	ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF,
81 	ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF,
82 	ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF,
83 	ACCEL_SEQUENCE_STATE_PULL_DATA,
84 	ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA,
85 	ACCEL_SEQUENCE_STATE_EXEC_TASK,
86 	ACCEL_SEQUENCE_STATE_AWAIT_TASK,
87 	ACCEL_SEQUENCE_STATE_COMPLETE_TASK,
88 	ACCEL_SEQUENCE_STATE_NEXT_TASK,
89 	ACCEL_SEQUENCE_STATE_PUSH_DATA,
90 	ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA,
91 	ACCEL_SEQUENCE_STATE_DRIVER_EXEC,
92 	ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK,
93 	ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE,
94 	ACCEL_SEQUENCE_STATE_ERROR,
95 	ACCEL_SEQUENCE_STATE_MAX,
96 };
97 
98 static const char *g_seq_states[]
99 __attribute__((unused)) = {
100 	[ACCEL_SEQUENCE_STATE_INIT] = "init",
101 	[ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf",
102 	[ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf",
103 	[ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf",
104 	[ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf",
105 	[ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data",
106 	[ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data",
107 	[ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task",
108 	[ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task",
109 	[ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task",
110 	[ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task",
111 	[ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data",
112 	[ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data",
113 	[ACCEL_SEQUENCE_STATE_DRIVER_EXEC] = "driver-exec",
114 	[ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK] = "driver-await-task",
115 	[ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE] = "driver-complete",
116 	[ACCEL_SEQUENCE_STATE_ERROR] = "error",
117 	[ACCEL_SEQUENCE_STATE_MAX] = "",
118 };
119 
120 #define ACCEL_SEQUENCE_STATE_STRING(s) \
121 	(((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \
122 	 ? g_seq_states[s] : "unknown")
123 
124 struct accel_buffer {
125 	struct spdk_accel_sequence	*seq;
126 	void				*buf;
127 	uint64_t			len;
128 	struct spdk_iobuf_entry		iobuf;
129 	spdk_accel_sequence_get_buf_cb	cb_fn;
130 	void				*cb_ctx;
131 	TAILQ_ENTRY(accel_buffer)	link;
132 };
133 
134 struct accel_io_channel {
135 	struct spdk_io_channel			*module_ch[ACCEL_OPC_LAST];
136 	void					*task_pool_base;
137 	struct spdk_accel_sequence		*seq_pool_base;
138 	struct accel_buffer			*buf_pool_base;
139 	TAILQ_HEAD(, spdk_accel_task)		task_pool;
140 	TAILQ_HEAD(, spdk_accel_sequence)	seq_pool;
141 	TAILQ_HEAD(, accel_buffer)		buf_pool;
142 	struct spdk_iobuf_channel		iobuf;
143 	struct accel_stats			stats;
144 };
145 
146 TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task);
147 
148 struct spdk_accel_sequence {
149 	struct accel_io_channel			*ch;
150 	struct accel_sequence_tasks		tasks;
151 	struct accel_sequence_tasks		completed;
152 	TAILQ_HEAD(, accel_buffer)		bounce_bufs;
153 	enum accel_sequence_state		state;
154 	int					status;
155 	bool					in_process_sequence;
156 	spdk_accel_completion_cb		cb_fn;
157 	void					*cb_arg;
158 	TAILQ_ENTRY(spdk_accel_sequence)	link;
159 };
160 
161 #define accel_update_stats(ch, event) (ch)->stats.event++
162 #define accel_update_task_stats(ch, task, event) \
163 	accel_update_stats(ch, operations[(task)->op_code].event)
164 
165 static inline void
166 accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state)
167 {
168 	SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq,
169 		      ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state));
170 	assert(seq->state != ACCEL_SEQUENCE_STATE_ERROR || state == ACCEL_SEQUENCE_STATE_ERROR);
171 	seq->state = state;
172 }
173 
174 static void
175 accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status)
176 {
177 	accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
178 	assert(status != 0);
179 	seq->status = status;
180 }
181 
182 int
183 spdk_accel_get_opc_module_name(enum accel_opcode opcode, const char **module_name)
184 {
185 	if (opcode >= ACCEL_OPC_LAST) {
186 		/* invalid opcode */
187 		return -EINVAL;
188 	}
189 
190 	if (g_modules_opc[opcode].module) {
191 		*module_name = g_modules_opc[opcode].module->name;
192 	} else {
193 		return -ENOENT;
194 	}
195 
196 	return 0;
197 }
198 
199 void
200 _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn)
201 {
202 	struct spdk_accel_module_if *accel_module;
203 	enum accel_opcode opcode;
204 	int j = 0;
205 
206 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
207 		for (opcode = 0; opcode < ACCEL_OPC_LAST; opcode++) {
208 			if (accel_module->supports_opcode(opcode)) {
209 				info->ops[j] = opcode;
210 				j++;
211 			}
212 		}
213 		info->name = accel_module->name;
214 		info->num_ops = j;
215 		fn(info);
216 		j = 0;
217 	}
218 }
219 
220 int
221 _accel_get_opc_name(enum accel_opcode opcode, const char **opcode_name)
222 {
223 	int rc = 0;
224 
225 	if (opcode < ACCEL_OPC_LAST) {
226 		*opcode_name = g_opcode_strings[opcode];
227 	} else {
228 		/* invalid opcode */
229 		rc = -EINVAL;
230 	}
231 
232 	return rc;
233 }
234 
235 int
236 spdk_accel_assign_opc(enum accel_opcode opcode, const char *name)
237 {
238 	if (g_modules_started == true) {
239 		/* we don't allow re-assignment once things have started */
240 		return -EINVAL;
241 	}
242 
243 	if (opcode >= ACCEL_OPC_LAST) {
244 		/* invalid opcode */
245 		return -EINVAL;
246 	}
247 
248 	/* module selection will be validated after the framework starts. */
249 	g_modules_opc_override[opcode] = strdup(name);
250 
251 	return 0;
252 }
253 
254 void
255 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status)
256 {
257 	struct accel_io_channel *accel_ch = accel_task->accel_ch;
258 	spdk_accel_completion_cb	cb_fn = accel_task->cb_fn;
259 	void				*cb_arg = accel_task->cb_arg;
260 
261 	/* We should put the accel_task into the list firstly in order to avoid
262 	 * the accel task list is exhausted when there is recursive call to
263 	 * allocate accel_task in user's call back function (cb_fn)
264 	 */
265 	TAILQ_INSERT_HEAD(&accel_ch->task_pool, accel_task, link);
266 
267 	accel_update_task_stats(accel_ch, accel_task, executed);
268 	if (spdk_unlikely(status != 0)) {
269 		accel_update_task_stats(accel_ch, accel_task, failed);
270 	}
271 
272 	cb_fn(cb_arg, status);
273 }
274 
275 inline static struct spdk_accel_task *
276 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg)
277 {
278 	struct spdk_accel_task *accel_task;
279 
280 	accel_task = TAILQ_FIRST(&accel_ch->task_pool);
281 	if (accel_task == NULL) {
282 		return NULL;
283 	}
284 
285 	TAILQ_REMOVE(&accel_ch->task_pool, accel_task, link);
286 	accel_task->link.tqe_next = NULL;
287 	accel_task->link.tqe_prev = NULL;
288 
289 	accel_task->cb_fn = cb_fn;
290 	accel_task->cb_arg = cb_arg;
291 	accel_task->accel_ch = accel_ch;
292 	accel_task->bounce.s.orig_iovs = NULL;
293 	accel_task->bounce.d.orig_iovs = NULL;
294 
295 	return accel_task;
296 }
297 
298 static inline int
299 accel_submit_task(struct accel_io_channel *accel_ch, struct spdk_accel_task *task)
300 {
301 	struct spdk_io_channel *module_ch = accel_ch->module_ch[task->op_code];
302 	struct spdk_accel_module_if *module = g_modules_opc[task->op_code].module;
303 	int rc;
304 
305 	rc = module->submit_tasks(module_ch, task);
306 	if (spdk_unlikely(rc != 0)) {
307 		accel_update_task_stats(accel_ch, task, failed);
308 	}
309 
310 	return rc;
311 }
312 
313 /* Accel framework public API for copy function */
314 int
315 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src,
316 		       uint64_t nbytes, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
317 {
318 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
319 	struct spdk_accel_task *accel_task;
320 
321 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
322 	if (accel_task == NULL) {
323 		return -ENOMEM;
324 	}
325 
326 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
327 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
328 	accel_task->d.iovs[0].iov_base = dst;
329 	accel_task->d.iovs[0].iov_len = nbytes;
330 	accel_task->d.iovcnt = 1;
331 	accel_task->s.iovs[0].iov_base = src;
332 	accel_task->s.iovs[0].iov_len = nbytes;
333 	accel_task->s.iovcnt = 1;
334 	accel_task->op_code = ACCEL_OPC_COPY;
335 	accel_task->flags = flags;
336 	accel_task->src_domain = NULL;
337 	accel_task->dst_domain = NULL;
338 	accel_task->step_cb_fn = NULL;
339 
340 	return accel_submit_task(accel_ch, accel_task);
341 }
342 
343 /* Accel framework public API for dual cast copy function */
344 int
345 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1,
346 			   void *dst2, void *src, uint64_t nbytes, int flags,
347 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
348 {
349 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
350 	struct spdk_accel_task *accel_task;
351 
352 	if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) {
353 		SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n");
354 		return -EINVAL;
355 	}
356 
357 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
358 	if (accel_task == NULL) {
359 		return -ENOMEM;
360 	}
361 
362 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
363 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
364 	accel_task->d2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST2];
365 	accel_task->d.iovs[0].iov_base = dst1;
366 	accel_task->d.iovs[0].iov_len = nbytes;
367 	accel_task->d.iovcnt = 1;
368 	accel_task->d2.iovs[0].iov_base = dst2;
369 	accel_task->d2.iovs[0].iov_len = nbytes;
370 	accel_task->d2.iovcnt = 1;
371 	accel_task->s.iovs[0].iov_base = src;
372 	accel_task->s.iovs[0].iov_len = nbytes;
373 	accel_task->s.iovcnt = 1;
374 	accel_task->flags = flags;
375 	accel_task->op_code = ACCEL_OPC_DUALCAST;
376 	accel_task->src_domain = NULL;
377 	accel_task->dst_domain = NULL;
378 	accel_task->step_cb_fn = NULL;
379 
380 	return accel_submit_task(accel_ch, accel_task);
381 }
382 
383 /* Accel framework public API for compare function */
384 int
385 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1,
386 			  void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
387 			  void *cb_arg)
388 {
389 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
390 	struct spdk_accel_task *accel_task;
391 
392 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
393 	if (accel_task == NULL) {
394 		return -ENOMEM;
395 	}
396 
397 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
398 	accel_task->s2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC2];
399 	accel_task->s.iovs[0].iov_base = src1;
400 	accel_task->s.iovs[0].iov_len = nbytes;
401 	accel_task->s.iovcnt = 1;
402 	accel_task->s2.iovs[0].iov_base = src2;
403 	accel_task->s2.iovs[0].iov_len = nbytes;
404 	accel_task->s2.iovcnt = 1;
405 	accel_task->op_code = ACCEL_OPC_COMPARE;
406 	accel_task->src_domain = NULL;
407 	accel_task->dst_domain = NULL;
408 	accel_task->step_cb_fn = NULL;
409 
410 	return accel_submit_task(accel_ch, accel_task);
411 }
412 
413 /* Accel framework public API for fill function */
414 int
415 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst,
416 		       uint8_t fill, uint64_t nbytes, int flags,
417 		       spdk_accel_completion_cb cb_fn, void *cb_arg)
418 {
419 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
420 	struct spdk_accel_task *accel_task;
421 
422 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
423 	if (accel_task == NULL) {
424 		return -ENOMEM;
425 	}
426 
427 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
428 	accel_task->d.iovs[0].iov_base = dst;
429 	accel_task->d.iovs[0].iov_len = nbytes;
430 	accel_task->d.iovcnt = 1;
431 	memset(&accel_task->fill_pattern, fill, sizeof(uint64_t));
432 	accel_task->flags = flags;
433 	accel_task->op_code = ACCEL_OPC_FILL;
434 	accel_task->src_domain = NULL;
435 	accel_task->dst_domain = NULL;
436 	accel_task->step_cb_fn = NULL;
437 
438 	return accel_submit_task(accel_ch, accel_task);
439 }
440 
441 /* Accel framework public API for CRC-32C function */
442 int
443 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst,
444 			 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
445 			 void *cb_arg)
446 {
447 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
448 	struct spdk_accel_task *accel_task;
449 
450 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
451 	if (accel_task == NULL) {
452 		return -ENOMEM;
453 	}
454 
455 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
456 	accel_task->s.iovs[0].iov_base = src;
457 	accel_task->s.iovs[0].iov_len = nbytes;
458 	accel_task->s.iovcnt = 1;
459 	accel_task->crc_dst = crc_dst;
460 	accel_task->seed = seed;
461 	accel_task->op_code = ACCEL_OPC_CRC32C;
462 	accel_task->src_domain = NULL;
463 	accel_task->dst_domain = NULL;
464 	accel_task->step_cb_fn = NULL;
465 
466 	return accel_submit_task(accel_ch, accel_task);
467 }
468 
469 /* Accel framework public API for chained CRC-32C function */
470 int
471 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst,
472 			  struct iovec *iov, uint32_t iov_cnt, uint32_t seed,
473 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
474 {
475 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
476 	struct spdk_accel_task *accel_task;
477 
478 	if (iov == NULL) {
479 		SPDK_ERRLOG("iov should not be NULL");
480 		return -EINVAL;
481 	}
482 
483 	if (!iov_cnt) {
484 		SPDK_ERRLOG("iovcnt should not be zero value\n");
485 		return -EINVAL;
486 	}
487 
488 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
489 	if (accel_task == NULL) {
490 		SPDK_ERRLOG("no memory\n");
491 		assert(0);
492 		return -ENOMEM;
493 	}
494 
495 	accel_task->s.iovs = iov;
496 	accel_task->s.iovcnt = iov_cnt;
497 	accel_task->crc_dst = crc_dst;
498 	accel_task->seed = seed;
499 	accel_task->op_code = ACCEL_OPC_CRC32C;
500 	accel_task->src_domain = NULL;
501 	accel_task->dst_domain = NULL;
502 	accel_task->step_cb_fn = NULL;
503 
504 	return accel_submit_task(accel_ch, accel_task);
505 }
506 
507 /* Accel framework public API for copy with CRC-32C function */
508 int
509 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst,
510 			      void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes,
511 			      int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
512 {
513 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
514 	struct spdk_accel_task *accel_task;
515 
516 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
517 	if (accel_task == NULL) {
518 		return -ENOMEM;
519 	}
520 
521 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
522 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
523 	accel_task->d.iovs[0].iov_base = dst;
524 	accel_task->d.iovs[0].iov_len = nbytes;
525 	accel_task->d.iovcnt = 1;
526 	accel_task->s.iovs[0].iov_base = src;
527 	accel_task->s.iovs[0].iov_len = nbytes;
528 	accel_task->s.iovcnt = 1;
529 	accel_task->crc_dst = crc_dst;
530 	accel_task->seed = seed;
531 	accel_task->flags = flags;
532 	accel_task->op_code = ACCEL_OPC_COPY_CRC32C;
533 	accel_task->src_domain = NULL;
534 	accel_task->dst_domain = NULL;
535 	accel_task->step_cb_fn = NULL;
536 
537 	return accel_submit_task(accel_ch, accel_task);
538 }
539 
540 /* Accel framework public API for chained copy + CRC-32C function */
541 int
542 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst,
543 			       struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst,
544 			       uint32_t seed, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
545 {
546 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
547 	struct spdk_accel_task *accel_task;
548 	uint64_t nbytes;
549 	uint32_t i;
550 
551 	if (src_iovs == NULL) {
552 		SPDK_ERRLOG("iov should not be NULL");
553 		return -EINVAL;
554 	}
555 
556 	if (!iov_cnt) {
557 		SPDK_ERRLOG("iovcnt should not be zero value\n");
558 		return -EINVAL;
559 	}
560 
561 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
562 	if (accel_task == NULL) {
563 		SPDK_ERRLOG("no memory\n");
564 		assert(0);
565 		return -ENOMEM;
566 	}
567 
568 	nbytes = 0;
569 	for (i = 0; i < iov_cnt; i++) {
570 		nbytes += src_iovs[i].iov_len;
571 	}
572 
573 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
574 	accel_task->d.iovs[0].iov_base = dst;
575 	accel_task->d.iovs[0].iov_len = nbytes;
576 	accel_task->d.iovcnt = 1;
577 	accel_task->s.iovs = src_iovs;
578 	accel_task->s.iovcnt = iov_cnt;
579 	accel_task->crc_dst = crc_dst;
580 	accel_task->seed = seed;
581 	accel_task->flags = flags;
582 	accel_task->op_code = ACCEL_OPC_COPY_CRC32C;
583 	accel_task->src_domain = NULL;
584 	accel_task->dst_domain = NULL;
585 	accel_task->step_cb_fn = NULL;
586 
587 	return accel_submit_task(accel_ch, accel_task);
588 }
589 
590 int
591 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes,
592 			   struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size, int flags,
593 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
594 {
595 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
596 	struct spdk_accel_task *accel_task;
597 
598 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
599 	if (accel_task == NULL) {
600 		return -ENOMEM;
601 	}
602 
603 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
604 	accel_task->d.iovs[0].iov_base = dst;
605 	accel_task->d.iovs[0].iov_len = nbytes;
606 	accel_task->d.iovcnt = 1;
607 	accel_task->output_size = output_size;
608 	accel_task->s.iovs = src_iovs;
609 	accel_task->s.iovcnt = src_iovcnt;
610 	accel_task->flags = flags;
611 	accel_task->op_code = ACCEL_OPC_COMPRESS;
612 	accel_task->src_domain = NULL;
613 	accel_task->dst_domain = NULL;
614 	accel_task->step_cb_fn = NULL;
615 
616 	return accel_submit_task(accel_ch, accel_task);
617 }
618 
619 int
620 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs,
621 			     size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
622 			     uint32_t *output_size, int flags, spdk_accel_completion_cb cb_fn,
623 			     void *cb_arg)
624 {
625 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
626 	struct spdk_accel_task *accel_task;
627 
628 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
629 	if (accel_task == NULL) {
630 		return -ENOMEM;
631 	}
632 
633 	accel_task->output_size = output_size;
634 	accel_task->s.iovs = src_iovs;
635 	accel_task->s.iovcnt = src_iovcnt;
636 	accel_task->d.iovs = dst_iovs;
637 	accel_task->d.iovcnt = dst_iovcnt;
638 	accel_task->flags = flags;
639 	accel_task->op_code = ACCEL_OPC_DECOMPRESS;
640 	accel_task->src_domain = NULL;
641 	accel_task->dst_domain = NULL;
642 	accel_task->step_cb_fn = NULL;
643 
644 	return accel_submit_task(accel_ch, accel_task);
645 }
646 
647 int
648 spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
649 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
650 			  struct iovec *src_iovs, uint32_t src_iovcnt,
651 			  uint64_t iv, uint32_t block_size, int flags,
652 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
653 {
654 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
655 	struct spdk_accel_task *accel_task;
656 
657 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
658 		return -EINVAL;
659 	}
660 
661 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
662 	if (accel_task == NULL) {
663 		return -ENOMEM;
664 	}
665 
666 	accel_task->crypto_key = key;
667 	accel_task->s.iovs = src_iovs;
668 	accel_task->s.iovcnt = src_iovcnt;
669 	accel_task->d.iovs = dst_iovs;
670 	accel_task->d.iovcnt = dst_iovcnt;
671 	accel_task->iv = iv;
672 	accel_task->block_size = block_size;
673 	accel_task->flags = flags;
674 	accel_task->op_code = ACCEL_OPC_ENCRYPT;
675 	accel_task->src_domain = NULL;
676 	accel_task->dst_domain = NULL;
677 	accel_task->step_cb_fn = NULL;
678 
679 	return accel_submit_task(accel_ch, accel_task);
680 }
681 
682 int
683 spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
684 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
685 			  struct iovec *src_iovs, uint32_t src_iovcnt,
686 			  uint64_t iv, uint32_t block_size, int flags,
687 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
688 {
689 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
690 	struct spdk_accel_task *accel_task;
691 
692 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
693 		return -EINVAL;
694 	}
695 
696 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
697 	if (accel_task == NULL) {
698 		return -ENOMEM;
699 	}
700 
701 	accel_task->crypto_key = key;
702 	accel_task->s.iovs = src_iovs;
703 	accel_task->s.iovcnt = src_iovcnt;
704 	accel_task->d.iovs = dst_iovs;
705 	accel_task->d.iovcnt = dst_iovcnt;
706 	accel_task->iv = iv;
707 	accel_task->block_size = block_size;
708 	accel_task->flags = flags;
709 	accel_task->op_code = ACCEL_OPC_DECRYPT;
710 	accel_task->src_domain = NULL;
711 	accel_task->dst_domain = NULL;
712 	accel_task->step_cb_fn = NULL;
713 
714 	return accel_submit_task(accel_ch, accel_task);
715 }
716 
717 int
718 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs,
719 		      uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
720 {
721 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
722 	struct spdk_accel_task *accel_task;
723 
724 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
725 	if (accel_task == NULL) {
726 		return -ENOMEM;
727 	}
728 
729 	accel_task->nsrcs.srcs = sources;
730 	accel_task->nsrcs.cnt = nsrcs;
731 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
732 	accel_task->d.iovs[0].iov_base = dst;
733 	accel_task->d.iovs[0].iov_len = nbytes;
734 	accel_task->d.iovcnt = 1;
735 	accel_task->op_code = ACCEL_OPC_XOR;
736 	accel_task->src_domain = NULL;
737 	accel_task->dst_domain = NULL;
738 	accel_task->step_cb_fn = NULL;
739 
740 	return accel_submit_task(accel_ch, accel_task);
741 }
742 
743 static inline struct accel_buffer *
744 accel_get_buf(struct accel_io_channel *ch, uint64_t len)
745 {
746 	struct accel_buffer *buf;
747 
748 	buf = TAILQ_FIRST(&ch->buf_pool);
749 	if (spdk_unlikely(buf == NULL)) {
750 		return NULL;
751 	}
752 
753 	TAILQ_REMOVE(&ch->buf_pool, buf, link);
754 	buf->len = len;
755 	buf->buf = NULL;
756 	buf->seq = NULL;
757 	buf->cb_fn = NULL;
758 
759 	return buf;
760 }
761 
762 static inline void
763 accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf)
764 {
765 	if (buf->buf != NULL) {
766 		spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len);
767 	}
768 
769 	TAILQ_INSERT_HEAD(&ch->buf_pool, buf, link);
770 }
771 
772 static inline struct spdk_accel_sequence *
773 accel_sequence_get(struct accel_io_channel *ch)
774 {
775 	struct spdk_accel_sequence *seq;
776 
777 	seq = TAILQ_FIRST(&ch->seq_pool);
778 	if (seq == NULL) {
779 		return NULL;
780 	}
781 
782 	TAILQ_REMOVE(&ch->seq_pool, seq, link);
783 
784 	TAILQ_INIT(&seq->tasks);
785 	TAILQ_INIT(&seq->completed);
786 	TAILQ_INIT(&seq->bounce_bufs);
787 
788 	seq->ch = ch;
789 	seq->status = 0;
790 	seq->state = ACCEL_SEQUENCE_STATE_INIT;
791 	seq->in_process_sequence = false;
792 
793 	return seq;
794 }
795 
796 static inline void
797 accel_sequence_put(struct spdk_accel_sequence *seq)
798 {
799 	struct accel_io_channel *ch = seq->ch;
800 	struct accel_buffer *buf;
801 
802 	while (!TAILQ_EMPTY(&seq->bounce_bufs)) {
803 		buf = TAILQ_FIRST(&seq->bounce_bufs);
804 		TAILQ_REMOVE(&seq->bounce_bufs, buf, link);
805 		accel_put_buf(seq->ch, buf);
806 	}
807 
808 	assert(TAILQ_EMPTY(&seq->tasks));
809 	assert(TAILQ_EMPTY(&seq->completed));
810 	seq->ch = NULL;
811 
812 	TAILQ_INSERT_HEAD(&ch->seq_pool, seq, link);
813 }
814 
815 static void accel_sequence_task_cb(void *cb_arg, int status);
816 
817 static inline struct spdk_accel_task *
818 accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq,
819 			spdk_accel_step_cb cb_fn, void *cb_arg)
820 {
821 	struct spdk_accel_task *task;
822 
823 	task = _get_task(ch, accel_sequence_task_cb, seq);
824 	if (task == NULL) {
825 		return task;
826 	}
827 
828 	task->step_cb_fn = cb_fn;
829 	task->step_cb_arg = cb_arg;
830 
831 	return task;
832 }
833 
834 int
835 spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
836 		       struct iovec *dst_iovs, uint32_t dst_iovcnt,
837 		       struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
838 		       struct iovec *src_iovs, uint32_t src_iovcnt,
839 		       struct spdk_memory_domain *src_domain, void *src_domain_ctx,
840 		       int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
841 {
842 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
843 	struct spdk_accel_task *task;
844 	struct spdk_accel_sequence *seq = *pseq;
845 
846 	if (seq == NULL) {
847 		seq = accel_sequence_get(accel_ch);
848 		if (spdk_unlikely(seq == NULL)) {
849 			return -ENOMEM;
850 		}
851 	}
852 
853 	assert(seq->ch == accel_ch);
854 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
855 	if (spdk_unlikely(task == NULL)) {
856 		if (*pseq == NULL) {
857 			accel_sequence_put(seq);
858 		}
859 
860 		return -ENOMEM;
861 	}
862 
863 	task->dst_domain = dst_domain;
864 	task->dst_domain_ctx = dst_domain_ctx;
865 	task->d.iovs = dst_iovs;
866 	task->d.iovcnt = dst_iovcnt;
867 	task->src_domain = src_domain;
868 	task->src_domain_ctx = src_domain_ctx;
869 	task->s.iovs = src_iovs;
870 	task->s.iovcnt = src_iovcnt;
871 	task->flags = flags;
872 	task->op_code = ACCEL_OPC_COPY;
873 
874 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
875 	*pseq = seq;
876 
877 	return 0;
878 }
879 
880 int
881 spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
882 		       void *buf, uint64_t len,
883 		       struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern,
884 		       int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
885 {
886 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
887 	struct spdk_accel_task *task;
888 	struct spdk_accel_sequence *seq = *pseq;
889 
890 	if (seq == NULL) {
891 		seq = accel_sequence_get(accel_ch);
892 		if (spdk_unlikely(seq == NULL)) {
893 			return -ENOMEM;
894 		}
895 	}
896 
897 	assert(seq->ch == accel_ch);
898 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
899 	if (spdk_unlikely(task == NULL)) {
900 		if (*pseq == NULL) {
901 			accel_sequence_put(seq);
902 		}
903 
904 		return -ENOMEM;
905 	}
906 
907 	memset(&task->fill_pattern, pattern, sizeof(uint64_t));
908 
909 	task->d.iovs = &task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
910 	task->d.iovs[0].iov_base = buf;
911 	task->d.iovs[0].iov_len = len;
912 	task->d.iovcnt = 1;
913 	task->src_domain = NULL;
914 	task->dst_domain = domain;
915 	task->dst_domain_ctx = domain_ctx;
916 	task->flags = flags;
917 	task->op_code = ACCEL_OPC_FILL;
918 
919 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
920 	*pseq = seq;
921 
922 	return 0;
923 }
924 
925 int
926 spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
927 			     struct iovec *dst_iovs, size_t dst_iovcnt,
928 			     struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
929 			     struct iovec *src_iovs, size_t src_iovcnt,
930 			     struct spdk_memory_domain *src_domain, void *src_domain_ctx,
931 			     int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
932 {
933 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
934 	struct spdk_accel_task *task;
935 	struct spdk_accel_sequence *seq = *pseq;
936 
937 	if (seq == NULL) {
938 		seq = accel_sequence_get(accel_ch);
939 		if (spdk_unlikely(seq == NULL)) {
940 			return -ENOMEM;
941 		}
942 	}
943 
944 	assert(seq->ch == accel_ch);
945 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
946 	if (spdk_unlikely(task == NULL)) {
947 		if (*pseq == NULL) {
948 			accel_sequence_put(seq);
949 		}
950 
951 		return -ENOMEM;
952 	}
953 
954 	/* TODO: support output_size for chaining */
955 	task->output_size = NULL;
956 	task->dst_domain = dst_domain;
957 	task->dst_domain_ctx = dst_domain_ctx;
958 	task->d.iovs = dst_iovs;
959 	task->d.iovcnt = dst_iovcnt;
960 	task->src_domain = src_domain;
961 	task->src_domain_ctx = src_domain_ctx;
962 	task->s.iovs = src_iovs;
963 	task->s.iovcnt = src_iovcnt;
964 	task->flags = flags;
965 	task->op_code = ACCEL_OPC_DECOMPRESS;
966 
967 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
968 	*pseq = seq;
969 
970 	return 0;
971 }
972 
973 int
974 spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
975 			  struct spdk_accel_crypto_key *key,
976 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
977 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
978 			  struct iovec *src_iovs, uint32_t src_iovcnt,
979 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
980 			  uint64_t iv, uint32_t block_size, int flags,
981 			  spdk_accel_step_cb cb_fn, void *cb_arg)
982 {
983 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
984 	struct spdk_accel_task *task;
985 	struct spdk_accel_sequence *seq = *pseq;
986 
987 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key ||
988 			  !block_size)) {
989 		return -EINVAL;
990 	}
991 
992 	if (seq == NULL) {
993 		seq = accel_sequence_get(accel_ch);
994 		if (spdk_unlikely(seq == NULL)) {
995 			return -ENOMEM;
996 		}
997 	}
998 
999 	assert(seq->ch == accel_ch);
1000 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1001 	if (spdk_unlikely(task == NULL)) {
1002 		if (*pseq == NULL) {
1003 			accel_sequence_put(seq);
1004 		}
1005 
1006 		return -ENOMEM;
1007 	}
1008 
1009 	task->crypto_key = key;
1010 	task->src_domain = src_domain;
1011 	task->src_domain_ctx = src_domain_ctx;
1012 	task->s.iovs = src_iovs;
1013 	task->s.iovcnt = src_iovcnt;
1014 	task->dst_domain = dst_domain;
1015 	task->dst_domain_ctx = dst_domain_ctx;
1016 	task->d.iovs = dst_iovs;
1017 	task->d.iovcnt = dst_iovcnt;
1018 	task->iv = iv;
1019 	task->block_size = block_size;
1020 	task->flags = flags;
1021 	task->op_code = ACCEL_OPC_ENCRYPT;
1022 
1023 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1024 	*pseq = seq;
1025 
1026 	return 0;
1027 }
1028 
1029 int
1030 spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1031 			  struct spdk_accel_crypto_key *key,
1032 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
1033 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1034 			  struct iovec *src_iovs, uint32_t src_iovcnt,
1035 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1036 			  uint64_t iv, uint32_t block_size, int flags,
1037 			  spdk_accel_step_cb cb_fn, void *cb_arg)
1038 {
1039 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1040 	struct spdk_accel_task *task;
1041 	struct spdk_accel_sequence *seq = *pseq;
1042 
1043 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key ||
1044 			  !block_size)) {
1045 		return -EINVAL;
1046 	}
1047 
1048 	if (seq == NULL) {
1049 		seq = accel_sequence_get(accel_ch);
1050 		if (spdk_unlikely(seq == NULL)) {
1051 			return -ENOMEM;
1052 		}
1053 	}
1054 
1055 	assert(seq->ch == accel_ch);
1056 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1057 	if (spdk_unlikely(task == NULL)) {
1058 		if (*pseq == NULL) {
1059 			accel_sequence_put(seq);
1060 		}
1061 
1062 		return -ENOMEM;
1063 	}
1064 
1065 	task->crypto_key = key;
1066 	task->src_domain = src_domain;
1067 	task->src_domain_ctx = src_domain_ctx;
1068 	task->s.iovs = src_iovs;
1069 	task->s.iovcnt = src_iovcnt;
1070 	task->dst_domain = dst_domain;
1071 	task->dst_domain_ctx = dst_domain_ctx;
1072 	task->d.iovs = dst_iovs;
1073 	task->d.iovcnt = dst_iovcnt;
1074 	task->iv = iv;
1075 	task->block_size = block_size;
1076 	task->flags = flags;
1077 	task->op_code = ACCEL_OPC_DECRYPT;
1078 
1079 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1080 	*pseq = seq;
1081 
1082 	return 0;
1083 }
1084 
1085 int
1086 spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf,
1087 		   struct spdk_memory_domain **domain, void **domain_ctx)
1088 {
1089 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1090 	struct accel_buffer *accel_buf;
1091 
1092 	accel_buf = accel_get_buf(accel_ch, len);
1093 	if (spdk_unlikely(accel_buf == NULL)) {
1094 		return -ENOMEM;
1095 	}
1096 
1097 	/* We always return the same pointer and identify the buffers through domain_ctx */
1098 	*buf = ACCEL_BUFFER_BASE;
1099 	*domain_ctx = accel_buf;
1100 	*domain = g_accel_domain;
1101 
1102 	return 0;
1103 }
1104 
1105 void
1106 spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf,
1107 		   struct spdk_memory_domain *domain, void *domain_ctx)
1108 {
1109 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1110 	struct accel_buffer *accel_buf = domain_ctx;
1111 
1112 	assert(domain == g_accel_domain);
1113 	assert(buf == ACCEL_BUFFER_BASE);
1114 
1115 	accel_put_buf(accel_ch, accel_buf);
1116 }
1117 
1118 static void
1119 accel_sequence_complete_tasks(struct spdk_accel_sequence *seq)
1120 {
1121 	struct spdk_accel_task *task;
1122 	struct accel_io_channel *ch = seq->ch;
1123 	spdk_accel_step_cb cb_fn;
1124 	void *cb_arg;
1125 
1126 	while (!TAILQ_EMPTY(&seq->completed)) {
1127 		task = TAILQ_FIRST(&seq->completed);
1128 		TAILQ_REMOVE(&seq->completed, task, seq_link);
1129 		cb_fn = task->step_cb_fn;
1130 		cb_arg = task->step_cb_arg;
1131 		TAILQ_INSERT_HEAD(&ch->task_pool, task, link);
1132 		if (cb_fn != NULL) {
1133 			cb_fn(cb_arg);
1134 		}
1135 	}
1136 
1137 	while (!TAILQ_EMPTY(&seq->tasks)) {
1138 		task = TAILQ_FIRST(&seq->tasks);
1139 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1140 		cb_fn = task->step_cb_fn;
1141 		cb_arg = task->step_cb_arg;
1142 		TAILQ_INSERT_HEAD(&ch->task_pool, task, link);
1143 		if (cb_fn != NULL) {
1144 			cb_fn(cb_arg);
1145 		}
1146 	}
1147 }
1148 
1149 static void
1150 accel_sequence_complete(struct spdk_accel_sequence *seq)
1151 {
1152 	SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, seq->status);
1153 
1154 	accel_update_stats(seq->ch, sequence_executed);
1155 	if (spdk_unlikely(seq->status != 0)) {
1156 		accel_update_stats(seq->ch, sequence_failed);
1157 	}
1158 
1159 	/* First notify all users that appended operations to this sequence */
1160 	accel_sequence_complete_tasks(seq);
1161 
1162 	/* Then notify the user that finished the sequence */
1163 	seq->cb_fn(seq->cb_arg, seq->status);
1164 
1165 	accel_sequence_put(seq);
1166 }
1167 
1168 static void
1169 accel_update_virt_iov(struct iovec *diov, struct iovec *siov, struct accel_buffer *accel_buf)
1170 {
1171 	uintptr_t offset;
1172 
1173 	offset = (uintptr_t)siov->iov_base & ACCEL_BUFFER_OFFSET_MASK;
1174 	assert(offset < accel_buf->len);
1175 
1176 	diov->iov_base = (char *)accel_buf->buf + offset;
1177 	diov->iov_len = siov->iov_len;
1178 }
1179 
1180 static void
1181 accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf)
1182 {
1183 	struct spdk_accel_task *task;
1184 	struct iovec *iov;
1185 
1186 	/* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks
1187 	 * in a sequence that were using it.
1188 	 */
1189 	TAILQ_FOREACH(task, &seq->tasks, seq_link) {
1190 		if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) {
1191 			iov = &task->aux_iovs[SPDK_ACCEL_AXU_IOV_VIRT_SRC];
1192 			assert(task->s.iovcnt == 1);
1193 			accel_update_virt_iov(iov, &task->s.iovs[0], buf);
1194 			task->src_domain = NULL;
1195 			task->s.iovs = iov;
1196 		}
1197 		if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) {
1198 			iov = &task->aux_iovs[SPDK_ACCEL_AXU_IOV_VIRT_DST];
1199 			assert(task->d.iovcnt == 1);
1200 			accel_update_virt_iov(iov, &task->d.iovs[0], buf);
1201 			task->dst_domain = NULL;
1202 			task->d.iovs = iov;
1203 		}
1204 	}
1205 }
1206 
1207 static void accel_process_sequence(struct spdk_accel_sequence *seq);
1208 
1209 static void
1210 accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf)
1211 {
1212 	struct accel_buffer *accel_buf;
1213 
1214 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1215 
1216 	assert(accel_buf->seq != NULL);
1217 	assert(accel_buf->buf == NULL);
1218 	accel_buf->buf = buf;
1219 
1220 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1221 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1222 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1223 	accel_process_sequence(accel_buf->seq);
1224 }
1225 
1226 static bool
1227 accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf,
1228 			 spdk_iobuf_get_cb cb_fn)
1229 {
1230 	struct accel_io_channel *ch = seq->ch;
1231 
1232 	assert(buf->buf == NULL);
1233 	assert(buf->seq == NULL);
1234 
1235 	buf->seq = seq;
1236 	buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn);
1237 	if (buf->buf == NULL) {
1238 		return false;
1239 	}
1240 
1241 	return true;
1242 }
1243 
1244 static bool
1245 accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1246 {
1247 	/* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to
1248 	 * NULL */
1249 	if (task->src_domain == g_accel_domain) {
1250 		if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx,
1251 					      accel_iobuf_get_virtbuf_cb)) {
1252 			return false;
1253 		}
1254 
1255 		accel_sequence_set_virtbuf(seq, task->src_domain_ctx);
1256 	}
1257 
1258 	if (task->dst_domain == g_accel_domain) {
1259 		if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx,
1260 					      accel_iobuf_get_virtbuf_cb)) {
1261 			return false;
1262 		}
1263 
1264 		accel_sequence_set_virtbuf(seq, task->dst_domain_ctx);
1265 	}
1266 
1267 	return true;
1268 }
1269 
1270 static void
1271 accel_sequence_get_buf_cb(struct spdk_iobuf_entry *entry, void *buf)
1272 {
1273 	struct accel_buffer *accel_buf;
1274 
1275 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1276 
1277 	assert(accel_buf->seq != NULL);
1278 	assert(accel_buf->buf == NULL);
1279 	accel_buf->buf = buf;
1280 
1281 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1282 	accel_buf->cb_fn(accel_buf->seq, accel_buf->cb_ctx);
1283 }
1284 
1285 bool
1286 spdk_accel_alloc_sequence_buf(struct spdk_accel_sequence *seq, void *buf,
1287 			      struct spdk_memory_domain *domain, void *domain_ctx,
1288 			      spdk_accel_sequence_get_buf_cb cb_fn, void *cb_ctx)
1289 {
1290 	struct accel_buffer *accel_buf = domain_ctx;
1291 
1292 	assert(domain == g_accel_domain);
1293 	accel_buf->cb_fn = cb_fn;
1294 	accel_buf->cb_ctx = cb_ctx;
1295 
1296 	if (!accel_sequence_alloc_buf(seq, accel_buf, accel_sequence_get_buf_cb)) {
1297 		return false;
1298 	}
1299 
1300 	accel_sequence_set_virtbuf(seq, accel_buf);
1301 
1302 	return true;
1303 }
1304 
1305 struct spdk_accel_task *
1306 spdk_accel_sequence_first_task(struct spdk_accel_sequence *seq)
1307 {
1308 	return TAILQ_FIRST(&seq->tasks);
1309 }
1310 
1311 struct spdk_accel_task *
1312 spdk_accel_sequence_next_task(struct spdk_accel_task *task)
1313 {
1314 	return TAILQ_NEXT(task, seq_link);
1315 }
1316 
1317 static inline uint64_t
1318 accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt)
1319 {
1320 	uint64_t result = 0;
1321 	uint32_t i;
1322 
1323 	for (i = 0; i < iovcnt; ++i) {
1324 		result += iovs[i].iov_len;
1325 	}
1326 
1327 	return result;
1328 }
1329 
1330 static inline void
1331 accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs,
1332 			uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx,
1333 			struct accel_buffer *buf)
1334 {
1335 	bounce->orig_iovs = *iovs;
1336 	bounce->orig_iovcnt = *iovcnt;
1337 	bounce->orig_domain = *domain;
1338 	bounce->orig_domain_ctx = *domain_ctx;
1339 	bounce->iov.iov_base = buf->buf;
1340 	bounce->iov.iov_len = buf->len;
1341 
1342 	*iovs = &bounce->iov;
1343 	*iovcnt = 1;
1344 	*domain = NULL;
1345 }
1346 
1347 static void
1348 accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1349 {
1350 	struct spdk_accel_task *task;
1351 	struct accel_buffer *accel_buf;
1352 
1353 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1354 	assert(accel_buf->buf == NULL);
1355 	accel_buf->buf = buf;
1356 
1357 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1358 	assert(task != NULL);
1359 
1360 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1361 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1362 	accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain,
1363 				&task->src_domain_ctx, accel_buf);
1364 	accel_process_sequence(accel_buf->seq);
1365 }
1366 
1367 static void
1368 accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1369 {
1370 	struct spdk_accel_task *task;
1371 	struct accel_buffer *accel_buf;
1372 
1373 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1374 	assert(accel_buf->buf == NULL);
1375 	accel_buf->buf = buf;
1376 
1377 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1378 	assert(task != NULL);
1379 
1380 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1381 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1382 	accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain,
1383 				&task->dst_domain_ctx, accel_buf);
1384 	accel_process_sequence(accel_buf->seq);
1385 }
1386 
1387 static int
1388 accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1389 {
1390 	struct accel_buffer *buf;
1391 
1392 	if (task->src_domain != NULL) {
1393 		/* By the time we're here, accel buffers should have been allocated */
1394 		assert(task->src_domain != g_accel_domain);
1395 
1396 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt));
1397 		if (buf == NULL) {
1398 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1399 			return -ENOMEM;
1400 		}
1401 
1402 		TAILQ_INSERT_TAIL(&seq->bounce_bufs, buf, link);
1403 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) {
1404 			return -EAGAIN;
1405 		}
1406 
1407 		accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt,
1408 					&task->src_domain, &task->src_domain_ctx, buf);
1409 	}
1410 
1411 	if (task->dst_domain != NULL) {
1412 		/* By the time we're here, accel buffers should have been allocated */
1413 		assert(task->dst_domain != g_accel_domain);
1414 
1415 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt));
1416 		if (buf == NULL) {
1417 			/* The src buffer will be released when a sequence is completed */
1418 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1419 			return -ENOMEM;
1420 		}
1421 
1422 		TAILQ_INSERT_TAIL(&seq->bounce_bufs, buf, link);
1423 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) {
1424 			return -EAGAIN;
1425 		}
1426 
1427 		accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt,
1428 					&task->dst_domain, &task->dst_domain_ctx, buf);
1429 	}
1430 
1431 	return 0;
1432 }
1433 
1434 static void
1435 accel_task_pull_data_cb(void *ctx, int status)
1436 {
1437 	struct spdk_accel_sequence *seq = ctx;
1438 
1439 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1440 	if (spdk_likely(status == 0)) {
1441 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1442 	} else {
1443 		accel_sequence_set_fail(seq, status);
1444 	}
1445 
1446 	accel_process_sequence(seq);
1447 }
1448 
1449 static void
1450 accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1451 {
1452 	int rc;
1453 
1454 	assert(task->bounce.s.orig_iovs != NULL);
1455 	assert(task->bounce.s.orig_domain != NULL);
1456 	assert(task->bounce.s.orig_domain != g_accel_domain);
1457 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1458 
1459 	rc = spdk_memory_domain_pull_data(task->bounce.s.orig_domain,
1460 					  task->bounce.s.orig_domain_ctx,
1461 					  task->bounce.s.orig_iovs, task->bounce.s.orig_iovcnt,
1462 					  task->s.iovs, task->s.iovcnt,
1463 					  accel_task_pull_data_cb, seq);
1464 	if (spdk_unlikely(rc != 0)) {
1465 		SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n",
1466 			    spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc);
1467 		accel_sequence_set_fail(seq, rc);
1468 	}
1469 }
1470 
1471 static void
1472 accel_task_push_data_cb(void *ctx, int status)
1473 {
1474 	struct spdk_accel_sequence *seq = ctx;
1475 
1476 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1477 	if (spdk_likely(status == 0)) {
1478 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1479 	} else {
1480 		accel_sequence_set_fail(seq, status);
1481 	}
1482 
1483 	accel_process_sequence(seq);
1484 }
1485 
1486 static void
1487 accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1488 {
1489 	int rc;
1490 
1491 	assert(task->bounce.d.orig_iovs != NULL);
1492 	assert(task->bounce.d.orig_domain != NULL);
1493 	assert(task->bounce.d.orig_domain != g_accel_domain);
1494 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1495 
1496 	rc = spdk_memory_domain_push_data(task->bounce.d.orig_domain,
1497 					  task->bounce.d.orig_domain_ctx,
1498 					  task->bounce.d.orig_iovs, task->bounce.d.orig_iovcnt,
1499 					  task->d.iovs, task->d.iovcnt,
1500 					  accel_task_push_data_cb, seq);
1501 	if (spdk_unlikely(rc != 0)) {
1502 		SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n",
1503 			    spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc);
1504 		accel_sequence_set_fail(seq, rc);
1505 	}
1506 }
1507 
1508 static void
1509 accel_process_sequence(struct spdk_accel_sequence *seq)
1510 {
1511 	struct accel_io_channel *accel_ch = seq->ch;
1512 	struct spdk_accel_task *task;
1513 	enum accel_sequence_state state;
1514 	int rc;
1515 
1516 	/* Prevent recursive calls to this function */
1517 	if (spdk_unlikely(seq->in_process_sequence)) {
1518 		return;
1519 	}
1520 	seq->in_process_sequence = true;
1521 
1522 	task = TAILQ_FIRST(&seq->tasks);
1523 	assert(task != NULL);
1524 
1525 	do {
1526 		state = seq->state;
1527 		switch (state) {
1528 		case ACCEL_SEQUENCE_STATE_INIT:
1529 			if (g_accel_driver != NULL) {
1530 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_EXEC);
1531 				break;
1532 			}
1533 		/* Fall through */
1534 		case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF:
1535 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1536 			if (!accel_sequence_check_virtbuf(seq, task)) {
1537 				/* We couldn't allocate a buffer, wait until one is available */
1538 				break;
1539 			}
1540 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1541 		/* Fall through */
1542 		case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF:
1543 			/* If a module supports memory domains, we don't need to allocate bounce
1544 			 * buffers */
1545 			if (g_modules_opc[task->op_code].supports_memory_domains) {
1546 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1547 				break;
1548 			}
1549 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1550 			rc = accel_sequence_check_bouncebuf(seq, task);
1551 			if (rc != 0) {
1552 				/* We couldn't allocate a buffer, wait until one is available */
1553 				if (rc == -EAGAIN) {
1554 					break;
1555 				}
1556 				accel_sequence_set_fail(seq, rc);
1557 				break;
1558 			}
1559 			if (task->bounce.s.orig_iovs != NULL) {
1560 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA);
1561 				break;
1562 			}
1563 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1564 		/* Fall through */
1565 		case ACCEL_SEQUENCE_STATE_EXEC_TASK:
1566 			SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n",
1567 				      g_opcode_strings[task->op_code], seq);
1568 
1569 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK);
1570 			rc = accel_submit_task(accel_ch, task);
1571 			if (spdk_unlikely(rc != 0)) {
1572 				SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n",
1573 					    g_opcode_strings[task->op_code], seq);
1574 				accel_sequence_set_fail(seq, rc);
1575 			}
1576 			break;
1577 		case ACCEL_SEQUENCE_STATE_PULL_DATA:
1578 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1579 			accel_task_pull_data(seq, task);
1580 			break;
1581 		case ACCEL_SEQUENCE_STATE_COMPLETE_TASK:
1582 			if (task->bounce.d.orig_iovs != NULL) {
1583 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA);
1584 				break;
1585 			}
1586 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1587 			break;
1588 		case ACCEL_SEQUENCE_STATE_PUSH_DATA:
1589 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1590 			accel_task_push_data(seq, task);
1591 			break;
1592 		case ACCEL_SEQUENCE_STATE_NEXT_TASK:
1593 			TAILQ_REMOVE(&seq->tasks, task, seq_link);
1594 			TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1595 			/* Check if there are any remaining tasks */
1596 			task = TAILQ_FIRST(&seq->tasks);
1597 			if (task == NULL) {
1598 				/* Immediately return here to make sure we don't touch the sequence
1599 				 * after it's completed */
1600 				accel_sequence_complete(seq);
1601 				return;
1602 			}
1603 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT);
1604 			break;
1605 		case ACCEL_SEQUENCE_STATE_DRIVER_EXEC:
1606 			assert(!TAILQ_EMPTY(&seq->tasks));
1607 
1608 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK);
1609 			rc = g_accel_driver->execute_sequence(seq);
1610 			if (spdk_unlikely(rc != 0)) {
1611 				SPDK_ERRLOG("Failed to execute sequence: %p using driver: %s\n",
1612 					    seq, g_accel_driver->name);
1613 				accel_sequence_set_fail(seq, rc);
1614 			}
1615 			break;
1616 		case ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE:
1617 			task = TAILQ_FIRST(&seq->tasks);
1618 			if (task == NULL) {
1619 				/* Immediately return here to make sure we don't touch the sequence
1620 				 * after it's completed */
1621 				accel_sequence_complete(seq);
1622 				return;
1623 			}
1624 			/* We don't want to execute the next task through the driver, so we
1625 			 * explicitly omit the INIT state here */
1626 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1627 			break;
1628 		case ACCEL_SEQUENCE_STATE_ERROR:
1629 			/* Immediately return here to make sure we don't touch the sequence
1630 			 * after it's completed */
1631 			assert(seq->status != 0);
1632 			accel_sequence_complete(seq);
1633 			return;
1634 		case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF:
1635 		case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF:
1636 		case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA:
1637 		case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1638 		case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA:
1639 		case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK:
1640 			break;
1641 		default:
1642 			assert(0 && "bad state");
1643 			break;
1644 		}
1645 	} while (seq->state != state);
1646 
1647 	seq->in_process_sequence = false;
1648 }
1649 
1650 static void
1651 accel_sequence_task_cb(void *cb_arg, int status)
1652 {
1653 	struct spdk_accel_sequence *seq = cb_arg;
1654 	struct spdk_accel_task *task = TAILQ_FIRST(&seq->tasks);
1655 	struct accel_io_channel *accel_ch = seq->ch;
1656 
1657 	/* spdk_accel_task_complete() puts the task back to the task pool, but we don't want to do
1658 	 * that if a task is part of a sequence.  Removing the task from that pool here is the
1659 	 * easiest way to prevent this, even though it is a bit hacky.
1660 	 */
1661 	assert(task != NULL);
1662 	TAILQ_REMOVE(&accel_ch->task_pool, task, link);
1663 
1664 	switch (seq->state) {
1665 	case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1666 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK);
1667 		if (spdk_unlikely(status != 0)) {
1668 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n",
1669 				    g_opcode_strings[task->op_code], seq);
1670 			accel_sequence_set_fail(seq, status);
1671 		}
1672 
1673 		accel_process_sequence(seq);
1674 		break;
1675 	case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK:
1676 		assert(g_accel_driver != NULL);
1677 		/* Immediately remove the task from the outstanding list to make sure the next call
1678 		 * to spdk_accel_sequence_first_task() doesn't return it */
1679 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1680 		TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1681 
1682 		if (spdk_unlikely(status != 0)) {
1683 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p through "
1684 				    "driver: %s\n", g_opcode_strings[task->op_code], seq,
1685 				    g_accel_driver->name);
1686 			/* Update status without using accel_sequence_set_fail() to avoid changing
1687 			 * seq's state to ERROR until driver calls spdk_accel_sequence_continue() */
1688 			seq->status = status;
1689 		}
1690 		break;
1691 	default:
1692 		assert(0 && "bad state");
1693 		break;
1694 	}
1695 }
1696 
1697 void
1698 spdk_accel_sequence_continue(struct spdk_accel_sequence *seq)
1699 {
1700 	assert(g_accel_driver != NULL);
1701 	assert(seq->state == ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK);
1702 
1703 	if (spdk_likely(seq->status == 0)) {
1704 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE);
1705 	} else {
1706 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
1707 	}
1708 
1709 	accel_process_sequence(seq);
1710 }
1711 
1712 static bool
1713 accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt)
1714 {
1715 	/* For now, just do a dumb check that the iovecs arrays are exactly the same */
1716 	if (iovacnt != iovbcnt) {
1717 		return false;
1718 	}
1719 
1720 	return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0;
1721 }
1722 
1723 static void
1724 accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task,
1725 			   struct spdk_accel_task **next_task)
1726 {
1727 	struct spdk_accel_task *next = *next_task;
1728 
1729 	switch (task->op_code) {
1730 	case ACCEL_OPC_COPY:
1731 		/* We only allow changing src of operations that actually have a src, e.g. we never
1732 		 * do it for fill.  Theoretically, it is possible, but we'd have to be careful to
1733 		 * change the src of the operation after fill (which in turn could also be a fill).
1734 		 * So, for the sake of simplicity, skip this type of operations for now.
1735 		 */
1736 		if (next->op_code != ACCEL_OPC_DECOMPRESS &&
1737 		    next->op_code != ACCEL_OPC_COPY &&
1738 		    next->op_code != ACCEL_OPC_ENCRYPT &&
1739 		    next->op_code != ACCEL_OPC_DECRYPT) {
1740 			break;
1741 		}
1742 		if (task->dst_domain != next->src_domain) {
1743 			break;
1744 		}
1745 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
1746 					next->s.iovs, next->s.iovcnt)) {
1747 			break;
1748 		}
1749 		next->s.iovs = task->s.iovs;
1750 		next->s.iovcnt = task->s.iovcnt;
1751 		next->src_domain = task->src_domain;
1752 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1753 		TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1754 		break;
1755 	case ACCEL_OPC_DECOMPRESS:
1756 	case ACCEL_OPC_FILL:
1757 	case ACCEL_OPC_ENCRYPT:
1758 	case ACCEL_OPC_DECRYPT:
1759 		/* We can only merge tasks when one of them is a copy */
1760 		if (next->op_code != ACCEL_OPC_COPY) {
1761 			break;
1762 		}
1763 		if (task->dst_domain != next->src_domain) {
1764 			break;
1765 		}
1766 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
1767 					next->s.iovs, next->s.iovcnt)) {
1768 			break;
1769 		}
1770 		task->d.iovs = next->d.iovs;
1771 		task->d.iovcnt = next->d.iovcnt;
1772 		task->dst_domain = next->dst_domain;
1773 		/* We're removing next_task from the tasks queue, so we need to update its pointer,
1774 		 * so that the TAILQ_FOREACH_SAFE() loop below works correctly */
1775 		*next_task = TAILQ_NEXT(next, seq_link);
1776 		TAILQ_REMOVE(&seq->tasks, next, seq_link);
1777 		TAILQ_INSERT_TAIL(&seq->completed, next, seq_link);
1778 		break;
1779 	default:
1780 		assert(0 && "bad opcode");
1781 		break;
1782 	}
1783 }
1784 
1785 int
1786 spdk_accel_sequence_finish(struct spdk_accel_sequence *seq,
1787 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
1788 {
1789 	struct spdk_accel_task *task, *next;
1790 
1791 	/* Try to remove any copy operations if possible */
1792 	TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) {
1793 		if (next == NULL) {
1794 			break;
1795 		}
1796 		accel_sequence_merge_tasks(seq, task, &next);
1797 	}
1798 
1799 	seq->cb_fn = cb_fn;
1800 	seq->cb_arg = cb_arg;
1801 
1802 	accel_process_sequence(seq);
1803 
1804 	return 0;
1805 }
1806 
1807 void
1808 spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq)
1809 {
1810 	struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks);
1811 	struct spdk_accel_task *task;
1812 
1813 	assert(TAILQ_EMPTY(&seq->completed));
1814 	TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link);
1815 
1816 	while (!TAILQ_EMPTY(&tasks)) {
1817 		task = TAILQ_FIRST(&tasks);
1818 		TAILQ_REMOVE(&tasks, task, seq_link);
1819 		TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link);
1820 	}
1821 }
1822 
1823 void
1824 spdk_accel_sequence_abort(struct spdk_accel_sequence *seq)
1825 {
1826 	if (seq == NULL) {
1827 		return;
1828 	}
1829 
1830 	accel_sequence_complete_tasks(seq);
1831 	accel_sequence_put(seq);
1832 }
1833 
1834 struct spdk_memory_domain *
1835 spdk_accel_get_memory_domain(void)
1836 {
1837 	return g_accel_domain;
1838 }
1839 
1840 static struct spdk_accel_module_if *
1841 _module_find_by_name(const char *name)
1842 {
1843 	struct spdk_accel_module_if *accel_module = NULL;
1844 
1845 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
1846 		if (strcmp(name, accel_module->name) == 0) {
1847 			break;
1848 		}
1849 	}
1850 
1851 	return accel_module;
1852 }
1853 
1854 static inline struct spdk_accel_crypto_key *
1855 _accel_crypto_key_get(const char *name)
1856 {
1857 	struct spdk_accel_crypto_key *key;
1858 
1859 	assert(spdk_spin_held(&g_keyring_spin));
1860 
1861 	TAILQ_FOREACH(key, &g_keyring, link) {
1862 		if (strcmp(name, key->param.key_name) == 0) {
1863 			return key;
1864 		}
1865 	}
1866 
1867 	return NULL;
1868 }
1869 
1870 static void
1871 accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key)
1872 {
1873 	if (key->param.hex_key) {
1874 		spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2);
1875 		free(key->param.hex_key);
1876 	}
1877 	if (key->param.hex_key2) {
1878 		spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2);
1879 		free(key->param.hex_key2);
1880 	}
1881 	free(key->param.key_name);
1882 	free(key->param.cipher);
1883 	if (key->key) {
1884 		spdk_memset_s(key->key, key->key_size, 0, key->key_size);
1885 		free(key->key);
1886 	}
1887 	if (key->key2) {
1888 		spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size);
1889 		free(key->key2);
1890 	}
1891 	free(key);
1892 }
1893 
1894 static void
1895 accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key)
1896 {
1897 	assert(key->module_if);
1898 	assert(key->module_if->crypto_key_deinit);
1899 
1900 	key->module_if->crypto_key_deinit(key);
1901 	accel_crypto_key_free_mem(key);
1902 }
1903 
1904 /*
1905  * This function mitigates a timing side channel which could be caused by using strcmp()
1906  * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in
1907  * the article [1] for more details
1908  * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html
1909  */
1910 static bool
1911 accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len)
1912 {
1913 	size_t i;
1914 	volatile size_t x = k1_len ^ k2_len;
1915 
1916 	for (i = 0; ((i < k1_len) & (i < k2_len)); i++) {
1917 		x |= k1[i] ^ k2[i];
1918 	}
1919 
1920 	return x == 0;
1921 }
1922 
1923 int
1924 spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param)
1925 {
1926 	struct spdk_accel_module_if *module;
1927 	struct spdk_accel_crypto_key *key;
1928 	size_t hex_key_size, hex_key2_size;
1929 	int rc;
1930 
1931 	if (!param || !param->hex_key || !param->cipher || !param->key_name) {
1932 		return -EINVAL;
1933 	}
1934 
1935 	if (g_modules_opc[ACCEL_OPC_ENCRYPT].module != g_modules_opc[ACCEL_OPC_DECRYPT].module) {
1936 		/* hardly ever possible, but let's check and warn the user */
1937 		SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n");
1938 	}
1939 	module = g_modules_opc[ACCEL_OPC_ENCRYPT].module;
1940 
1941 	if (!module) {
1942 		SPDK_ERRLOG("No accel module found assigned for crypto operation\n");
1943 		return -ENOENT;
1944 	}
1945 	if (!module->crypto_key_init) {
1946 		SPDK_ERRLOG("Accel module \"%s\" doesn't support crypto operations\n", module->name);
1947 		return -ENOTSUP;
1948 	}
1949 
1950 	key = calloc(1, sizeof(*key));
1951 	if (!key) {
1952 		return -ENOMEM;
1953 	}
1954 
1955 	key->param.key_name = strdup(param->key_name);
1956 	if (!key->param.key_name) {
1957 		rc = -ENOMEM;
1958 		goto error;
1959 	}
1960 
1961 	key->param.cipher = strdup(param->cipher);
1962 	if (!key->param.cipher) {
1963 		rc = -ENOMEM;
1964 		goto error;
1965 	}
1966 
1967 	hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
1968 	if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
1969 		SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
1970 		rc = -EINVAL;
1971 		goto error;
1972 	}
1973 	key->param.hex_key = strdup(param->hex_key);
1974 	if (!key->param.hex_key) {
1975 		rc = -ENOMEM;
1976 		goto error;
1977 	}
1978 
1979 	key->key_size = hex_key_size / 2;
1980 	key->key = spdk_unhexlify(key->param.hex_key);
1981 	if (!key->key) {
1982 		SPDK_ERRLOG("Failed to unhexlify key1\n");
1983 		rc = -EINVAL;
1984 		goto error;
1985 	}
1986 
1987 	if (param->hex_key2) {
1988 		hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
1989 		if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
1990 			SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
1991 			rc = -EINVAL;
1992 			goto error;
1993 		}
1994 		key->param.hex_key2 = strdup(param->hex_key2);
1995 		if (!key->param.hex_key2) {
1996 			rc = -ENOMEM;
1997 			goto error;
1998 		}
1999 
2000 		key->key2_size = hex_key2_size / 2;
2001 		key->key2 = spdk_unhexlify(key->param.hex_key2);
2002 		if (!key->key2) {
2003 			SPDK_ERRLOG("Failed to unhexlify key2\n");
2004 			rc = -EINVAL;
2005 			goto error;
2006 		}
2007 
2008 		if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) {
2009 			SPDK_ERRLOG("Identical keys are not secure\n");
2010 			rc = -EINVAL;
2011 			goto error;
2012 		}
2013 	}
2014 
2015 	key->module_if = module;
2016 
2017 	spdk_spin_lock(&g_keyring_spin);
2018 	if (_accel_crypto_key_get(param->key_name)) {
2019 		rc = -EEXIST;
2020 	} else {
2021 		rc = module->crypto_key_init(key);
2022 		if (!rc) {
2023 			TAILQ_INSERT_TAIL(&g_keyring, key, link);
2024 		}
2025 	}
2026 	spdk_spin_unlock(&g_keyring_spin);
2027 
2028 	if (rc) {
2029 		goto error;
2030 	}
2031 
2032 	return 0;
2033 
2034 error:
2035 	accel_crypto_key_free_mem(key);
2036 	return rc;
2037 }
2038 
2039 int
2040 spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key)
2041 {
2042 	if (!key || !key->module_if) {
2043 		return -EINVAL;
2044 	}
2045 
2046 	spdk_spin_lock(&g_keyring_spin);
2047 	if (!_accel_crypto_key_get(key->param.key_name)) {
2048 		spdk_spin_unlock(&g_keyring_spin);
2049 		return -ENOENT;
2050 	}
2051 	TAILQ_REMOVE(&g_keyring, key, link);
2052 	spdk_spin_unlock(&g_keyring_spin);
2053 
2054 	accel_crypto_key_destroy_unsafe(key);
2055 
2056 	return 0;
2057 }
2058 
2059 struct spdk_accel_crypto_key *
2060 spdk_accel_crypto_key_get(const char *name)
2061 {
2062 	struct spdk_accel_crypto_key *key;
2063 
2064 	spdk_spin_lock(&g_keyring_spin);
2065 	key = _accel_crypto_key_get(name);
2066 	spdk_spin_unlock(&g_keyring_spin);
2067 
2068 	return key;
2069 }
2070 
2071 /* Helper function when accel modules register with the framework. */
2072 void
2073 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module)
2074 {
2075 	if (_module_find_by_name(accel_module->name)) {
2076 		SPDK_NOTICELOG("Accel module %s already registered\n", accel_module->name);
2077 		assert(false);
2078 		return;
2079 	}
2080 
2081 	/* Make sure that the software module is at the head of the list, this
2082 	 * will assure that all opcodes are later assigned to software first and
2083 	 * then updated to HW modules as they are registered.
2084 	 */
2085 	if (strcmp(accel_module->name, "software") == 0) {
2086 		TAILQ_INSERT_HEAD(&spdk_accel_module_list, accel_module, tailq);
2087 	} else {
2088 		TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq);
2089 	}
2090 
2091 	if (accel_module->get_ctx_size && accel_module->get_ctx_size() > g_max_accel_module_size) {
2092 		g_max_accel_module_size = accel_module->get_ctx_size();
2093 	}
2094 }
2095 
2096 /* Framework level channel create callback. */
2097 static int
2098 accel_create_channel(void *io_device, void *ctx_buf)
2099 {
2100 	struct accel_io_channel	*accel_ch = ctx_buf;
2101 	struct spdk_accel_task *accel_task;
2102 	struct spdk_accel_sequence *seq;
2103 	struct accel_buffer *buf;
2104 	uint8_t *task_mem;
2105 	int i = 0, j, rc;
2106 
2107 	accel_ch->task_pool_base = calloc(MAX_TASKS_PER_CHANNEL, g_max_accel_module_size);
2108 	if (accel_ch->task_pool_base == NULL) {
2109 		return -ENOMEM;
2110 	}
2111 
2112 	accel_ch->seq_pool_base = calloc(MAX_TASKS_PER_CHANNEL, sizeof(struct spdk_accel_sequence));
2113 	if (accel_ch->seq_pool_base == NULL) {
2114 		goto err;
2115 	}
2116 
2117 	accel_ch->buf_pool_base = calloc(MAX_TASKS_PER_CHANNEL, sizeof(struct accel_buffer));
2118 	if (accel_ch->buf_pool_base == NULL) {
2119 		goto err;
2120 	}
2121 
2122 	TAILQ_INIT(&accel_ch->task_pool);
2123 	TAILQ_INIT(&accel_ch->seq_pool);
2124 	TAILQ_INIT(&accel_ch->buf_pool);
2125 	task_mem = accel_ch->task_pool_base;
2126 	for (i = 0 ; i < MAX_TASKS_PER_CHANNEL; i++) {
2127 		accel_task = (struct spdk_accel_task *)task_mem;
2128 		seq = &accel_ch->seq_pool_base[i];
2129 		buf = &accel_ch->buf_pool_base[i];
2130 		TAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link);
2131 		TAILQ_INSERT_TAIL(&accel_ch->seq_pool, seq, link);
2132 		TAILQ_INSERT_TAIL(&accel_ch->buf_pool, buf, link);
2133 		task_mem += g_max_accel_module_size;
2134 	}
2135 
2136 	/* Assign modules and get IO channels for each */
2137 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
2138 		accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel();
2139 		/* This can happen if idxd runs out of channels. */
2140 		if (accel_ch->module_ch[i] == NULL) {
2141 			goto err;
2142 		}
2143 	}
2144 
2145 	rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", g_opts.small_cache_size,
2146 				     g_opts.large_cache_size);
2147 	if (rc != 0) {
2148 		SPDK_ERRLOG("Failed to initialize iobuf accel channel\n");
2149 		goto err;
2150 	}
2151 
2152 	return 0;
2153 err:
2154 	for (j = 0; j < i; j++) {
2155 		spdk_put_io_channel(accel_ch->module_ch[j]);
2156 	}
2157 	free(accel_ch->task_pool_base);
2158 	free(accel_ch->seq_pool_base);
2159 	free(accel_ch->buf_pool_base);
2160 
2161 	return -ENOMEM;
2162 }
2163 
2164 static void
2165 accel_add_stats(struct accel_stats *total, struct accel_stats *stats)
2166 {
2167 	int i;
2168 
2169 	total->sequence_executed += stats->sequence_executed;
2170 	total->sequence_failed += stats->sequence_failed;
2171 	for (i = 0; i < ACCEL_OPC_LAST; ++i) {
2172 		total->operations[i].executed += stats->operations[i].executed;
2173 		total->operations[i].failed += stats->operations[i].failed;
2174 	}
2175 }
2176 
2177 /* Framework level channel destroy callback. */
2178 static void
2179 accel_destroy_channel(void *io_device, void *ctx_buf)
2180 {
2181 	struct accel_io_channel	*accel_ch = ctx_buf;
2182 	int i;
2183 
2184 	spdk_iobuf_channel_fini(&accel_ch->iobuf);
2185 
2186 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
2187 		assert(accel_ch->module_ch[i] != NULL);
2188 		spdk_put_io_channel(accel_ch->module_ch[i]);
2189 		accel_ch->module_ch[i] = NULL;
2190 	}
2191 
2192 	/* Update global stats to make sure channel's stats aren't lost after a channel is gone */
2193 	spdk_spin_lock(&g_stats_lock);
2194 	accel_add_stats(&g_stats, &accel_ch->stats);
2195 	spdk_spin_unlock(&g_stats_lock);
2196 
2197 	free(accel_ch->task_pool_base);
2198 	free(accel_ch->seq_pool_base);
2199 	free(accel_ch->buf_pool_base);
2200 }
2201 
2202 struct spdk_io_channel *
2203 spdk_accel_get_io_channel(void)
2204 {
2205 	return spdk_get_io_channel(&spdk_accel_module_list);
2206 }
2207 
2208 static void
2209 accel_module_initialize(void)
2210 {
2211 	struct spdk_accel_module_if *accel_module;
2212 
2213 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2214 		accel_module->module_init();
2215 	}
2216 }
2217 
2218 static void
2219 accel_module_init_opcode(enum accel_opcode opcode)
2220 {
2221 	struct accel_module *module = &g_modules_opc[opcode];
2222 	struct spdk_accel_module_if *module_if = module->module;
2223 
2224 	if (module_if->get_memory_domains != NULL) {
2225 		module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0;
2226 	}
2227 }
2228 
2229 int
2230 spdk_accel_initialize(void)
2231 {
2232 	enum accel_opcode op;
2233 	struct spdk_accel_module_if *accel_module = NULL;
2234 	int rc;
2235 
2236 	rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL,
2237 				       "SPDK_ACCEL_DMA_DEVICE");
2238 	if (rc != 0) {
2239 		SPDK_ERRLOG("Failed to create accel memory domain\n");
2240 		return rc;
2241 	}
2242 
2243 	spdk_spin_init(&g_keyring_spin);
2244 	spdk_spin_init(&g_stats_lock);
2245 
2246 	g_modules_started = true;
2247 	accel_module_initialize();
2248 
2249 	/* Create our priority global map of opcodes to modules, we populate starting
2250 	 * with the software module (guaranteed to be first on the list) and then
2251 	 * updating opcodes with HW modules that have been initialized.
2252 	 * NOTE: all opcodes must be supported by software in the event that no HW
2253 	 * modules are initialized to support the operation.
2254 	 */
2255 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2256 		for (op = 0; op < ACCEL_OPC_LAST; op++) {
2257 			if (accel_module->supports_opcode(op)) {
2258 				g_modules_opc[op].module = accel_module;
2259 				SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name);
2260 			}
2261 		}
2262 	}
2263 
2264 	/* Now lets check for overrides and apply all that exist */
2265 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
2266 		if (g_modules_opc_override[op] != NULL) {
2267 			accel_module = _module_find_by_name(g_modules_opc_override[op]);
2268 			if (accel_module == NULL) {
2269 				SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]);
2270 				rc = -EINVAL;
2271 				goto error;
2272 			}
2273 			if (accel_module->supports_opcode(op) == false) {
2274 				SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op);
2275 				rc = -EINVAL;
2276 				goto error;
2277 			}
2278 			g_modules_opc[op].module = accel_module;
2279 		}
2280 	}
2281 
2282 	if (g_modules_opc[ACCEL_OPC_ENCRYPT].module != g_modules_opc[ACCEL_OPC_DECRYPT].module) {
2283 		SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations");
2284 		rc = -EINVAL;
2285 		goto error;
2286 	}
2287 
2288 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
2289 		assert(g_modules_opc[op].module != NULL);
2290 		accel_module_init_opcode(op);
2291 	}
2292 
2293 	rc = spdk_iobuf_register_module("accel");
2294 	if (rc != 0) {
2295 		SPDK_ERRLOG("Failed to register accel iobuf module\n");
2296 		goto error;
2297 	}
2298 
2299 	/*
2300 	 * We need a unique identifier for the accel framework, so use the
2301 	 * spdk_accel_module_list address for this purpose.
2302 	 */
2303 	spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel,
2304 				sizeof(struct accel_io_channel), "accel");
2305 
2306 	return 0;
2307 error:
2308 	spdk_memory_domain_destroy(g_accel_domain);
2309 
2310 	return rc;
2311 }
2312 
2313 static void
2314 accel_module_finish_cb(void)
2315 {
2316 	spdk_accel_fini_cb cb_fn = g_fini_cb_fn;
2317 
2318 	spdk_memory_domain_destroy(g_accel_domain);
2319 
2320 	cb_fn(g_fini_cb_arg);
2321 	g_fini_cb_fn = NULL;
2322 	g_fini_cb_arg = NULL;
2323 }
2324 
2325 static void
2326 accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str,
2327 			   const char *module_str)
2328 {
2329 	spdk_json_write_object_begin(w);
2330 	spdk_json_write_named_string(w, "method", "accel_assign_opc");
2331 	spdk_json_write_named_object_begin(w, "params");
2332 	spdk_json_write_named_string(w, "opname", opc_str);
2333 	spdk_json_write_named_string(w, "module", module_str);
2334 	spdk_json_write_object_end(w);
2335 	spdk_json_write_object_end(w);
2336 }
2337 
2338 static void
2339 __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2340 {
2341 	spdk_json_write_named_string(w, "name", key->param.key_name);
2342 	spdk_json_write_named_string(w, "cipher", key->param.cipher);
2343 	spdk_json_write_named_string(w, "key", key->param.hex_key);
2344 	if (key->param.hex_key2) {
2345 		spdk_json_write_named_string(w, "key2", key->param.hex_key2);
2346 	}
2347 }
2348 
2349 void
2350 _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2351 {
2352 	spdk_json_write_object_begin(w);
2353 	__accel_crypto_key_dump_param(w, key);
2354 	spdk_json_write_object_end(w);
2355 }
2356 
2357 static void
2358 _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w,
2359 				    struct spdk_accel_crypto_key *key)
2360 {
2361 	spdk_json_write_object_begin(w);
2362 	spdk_json_write_named_string(w, "method", "accel_crypto_key_create");
2363 	spdk_json_write_named_object_begin(w, "params");
2364 	__accel_crypto_key_dump_param(w, key);
2365 	spdk_json_write_object_end(w);
2366 	spdk_json_write_object_end(w);
2367 }
2368 
2369 static void
2370 accel_write_options(struct spdk_json_write_ctx *w)
2371 {
2372 	spdk_json_write_object_begin(w);
2373 	spdk_json_write_named_string(w, "method", "accel_set_options");
2374 	spdk_json_write_named_object_begin(w, "params");
2375 	spdk_json_write_named_uint32(w, "small_cache_size", g_opts.small_cache_size);
2376 	spdk_json_write_named_uint32(w, "large_cache_size", g_opts.large_cache_size);
2377 	spdk_json_write_object_end(w);
2378 	spdk_json_write_object_end(w);
2379 }
2380 
2381 static void
2382 _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump)
2383 {
2384 	struct spdk_accel_crypto_key *key;
2385 
2386 	spdk_spin_lock(&g_keyring_spin);
2387 	TAILQ_FOREACH(key, &g_keyring, link) {
2388 		if (full_dump) {
2389 			_accel_crypto_key_write_config_json(w, key);
2390 		} else {
2391 			_accel_crypto_key_dump_param(w, key);
2392 		}
2393 	}
2394 	spdk_spin_unlock(&g_keyring_spin);
2395 }
2396 
2397 void
2398 _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w)
2399 {
2400 	_accel_crypto_keys_write_config_json(w, false);
2401 }
2402 
2403 void
2404 spdk_accel_write_config_json(struct spdk_json_write_ctx *w)
2405 {
2406 	struct spdk_accel_module_if *accel_module;
2407 	int i;
2408 
2409 	spdk_json_write_array_begin(w);
2410 	accel_write_options(w);
2411 
2412 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2413 		if (accel_module->write_config_json) {
2414 			accel_module->write_config_json(w);
2415 		}
2416 	}
2417 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
2418 		if (g_modules_opc_override[i]) {
2419 			accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]);
2420 		}
2421 	}
2422 
2423 	_accel_crypto_keys_write_config_json(w, true);
2424 
2425 	spdk_json_write_array_end(w);
2426 }
2427 
2428 void
2429 spdk_accel_module_finish(void)
2430 {
2431 	if (!g_accel_module) {
2432 		g_accel_module = TAILQ_FIRST(&spdk_accel_module_list);
2433 	} else {
2434 		g_accel_module = TAILQ_NEXT(g_accel_module, tailq);
2435 	}
2436 
2437 	if (!g_accel_module) {
2438 		spdk_spin_destroy(&g_keyring_spin);
2439 		accel_module_finish_cb();
2440 		return;
2441 	}
2442 
2443 	if (g_accel_module->module_fini) {
2444 		spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL);
2445 	} else {
2446 		spdk_accel_module_finish();
2447 	}
2448 }
2449 
2450 void
2451 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg)
2452 {
2453 	struct spdk_accel_crypto_key *key, *key_tmp;
2454 	enum accel_opcode op;
2455 
2456 	assert(cb_fn != NULL);
2457 
2458 	g_fini_cb_fn = cb_fn;
2459 	g_fini_cb_arg = cb_arg;
2460 
2461 	spdk_spin_lock(&g_keyring_spin);
2462 	TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) {
2463 		accel_crypto_key_destroy_unsafe(key);
2464 	}
2465 	spdk_spin_unlock(&g_keyring_spin);
2466 
2467 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
2468 		if (g_modules_opc_override[op] != NULL) {
2469 			free(g_modules_opc_override[op]);
2470 			g_modules_opc_override[op] = NULL;
2471 		}
2472 		g_modules_opc[op].module = NULL;
2473 	}
2474 
2475 	spdk_io_device_unregister(&spdk_accel_module_list, NULL);
2476 	spdk_accel_module_finish();
2477 }
2478 
2479 static struct spdk_accel_driver *
2480 accel_find_driver(const char *name)
2481 {
2482 	struct spdk_accel_driver *driver;
2483 
2484 	TAILQ_FOREACH(driver, &g_accel_drivers, tailq) {
2485 		if (strcmp(driver->name, name) == 0) {
2486 			return driver;
2487 		}
2488 	}
2489 
2490 	return NULL;
2491 }
2492 
2493 int
2494 spdk_accel_set_driver(const char *name)
2495 {
2496 	struct spdk_accel_driver *driver;
2497 
2498 	driver = accel_find_driver(name);
2499 	if (driver == NULL) {
2500 		SPDK_ERRLOG("Couldn't find driver named '%s'\n", name);
2501 		return -ENODEV;
2502 	}
2503 
2504 	g_accel_driver = driver;
2505 
2506 	return 0;
2507 }
2508 
2509 void
2510 spdk_accel_driver_register(struct spdk_accel_driver *driver)
2511 {
2512 	if (accel_find_driver(driver->name)) {
2513 		SPDK_ERRLOG("Driver named '%s' has already been registered\n", driver->name);
2514 		assert(0);
2515 		return;
2516 	}
2517 
2518 	TAILQ_INSERT_TAIL(&g_accel_drivers, driver, tailq);
2519 }
2520 
2521 int
2522 spdk_accel_set_opts(const struct spdk_accel_opts *opts)
2523 {
2524 	if (opts->size > sizeof(*opts)) {
2525 		return -EINVAL;
2526 	}
2527 
2528 	memcpy(&g_opts, opts, opts->size);
2529 
2530 	return 0;
2531 }
2532 
2533 void
2534 spdk_accel_get_opts(struct spdk_accel_opts *opts)
2535 {
2536 	size_t size = opts->size;
2537 
2538 	assert(size <= sizeof(*opts));
2539 
2540 	memcpy(opts, &g_opts, spdk_min(sizeof(*opts), size));
2541 	opts->size = size;
2542 }
2543 
2544 struct accel_get_stats_ctx {
2545 	struct accel_stats	stats;
2546 	accel_get_stats_cb	cb_fn;
2547 	void			*cb_arg;
2548 };
2549 
2550 static void
2551 accel_get_channel_stats_done(struct spdk_io_channel_iter *iter, int status)
2552 {
2553 	struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
2554 
2555 	ctx->cb_fn(&ctx->stats, ctx->cb_arg);
2556 	free(ctx);
2557 }
2558 
2559 static void
2560 accel_get_channel_stats(struct spdk_io_channel_iter *iter)
2561 {
2562 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(iter);
2563 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
2564 	struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
2565 
2566 	accel_add_stats(&ctx->stats, &accel_ch->stats);
2567 	spdk_for_each_channel_continue(iter, 0);
2568 }
2569 
2570 int
2571 accel_get_stats(accel_get_stats_cb cb_fn, void *cb_arg)
2572 {
2573 	struct accel_get_stats_ctx *ctx;
2574 
2575 	ctx = calloc(1, sizeof(*ctx));
2576 	if (ctx == NULL) {
2577 		return -ENOMEM;
2578 	}
2579 
2580 	spdk_spin_lock(&g_stats_lock);
2581 	accel_add_stats(&ctx->stats, &g_stats);
2582 	spdk_spin_unlock(&g_stats_lock);
2583 
2584 	ctx->cb_fn = cb_fn;
2585 	ctx->cb_arg = cb_arg;
2586 
2587 	spdk_for_each_channel(&spdk_accel_module_list, accel_get_channel_stats, ctx,
2588 			      accel_get_channel_stats_done);
2589 
2590 	return 0;
2591 }
2592 
2593 SPDK_LOG_REGISTER_COMPONENT(accel)
2594