xref: /spdk/lib/accel/accel.c (revision fa272c9bc68fb30877ce5c2817feaa30418666a6)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2020 Intel Corporation.
3  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES.
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/accel_module.h"
10 
11 #include "accel_internal.h"
12 
13 #include "spdk/dma.h"
14 #include "spdk/env.h"
15 #include "spdk/likely.h"
16 #include "spdk/log.h"
17 #include "spdk/thread.h"
18 #include "spdk/json.h"
19 #include "spdk/crc32.h"
20 #include "spdk/util.h"
21 #include "spdk/hexlify.h"
22 
23 /* Accelerator Framework: The following provides a top level
24  * generic API for the accelerator functions defined here. Modules,
25  * such as the one in /module/accel/ioat, supply the implementation
26  * with the exception of the pure software implementation contained
27  * later in this file.
28  */
29 
30 #define ALIGN_4K			0x1000
31 #define MAX_TASKS_PER_CHANNEL		0x800
32 #define ACCEL_SMALL_CACHE_SIZE		0
33 #define ACCEL_LARGE_CACHE_SIZE		0
34 /* Set MSB, so we don't return NULL pointers as buffers */
35 #define ACCEL_BUFFER_BASE		((void *)(1ull << 63))
36 #define ACCEL_BUFFER_OFFSET_MASK	((uintptr_t)ACCEL_BUFFER_BASE - 1)
37 
38 struct accel_module {
39 	struct spdk_accel_module_if	*module;
40 	bool				supports_memory_domains;
41 };
42 
43 /* Largest context size for all accel modules */
44 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task);
45 
46 static struct spdk_accel_module_if *g_accel_module = NULL;
47 static spdk_accel_fini_cb g_fini_cb_fn = NULL;
48 static void *g_fini_cb_arg = NULL;
49 static bool g_modules_started = false;
50 static struct spdk_memory_domain *g_accel_domain;
51 
52 /* Global list of registered accelerator modules */
53 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list =
54 	TAILQ_HEAD_INITIALIZER(spdk_accel_module_list);
55 
56 /* Crypto keyring */
57 static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring);
58 static struct spdk_spinlock g_keyring_spin;
59 
60 /* Global array mapping capabilities to modules */
61 static struct accel_module g_modules_opc[ACCEL_OPC_LAST] = {};
62 static char *g_modules_opc_override[ACCEL_OPC_LAST] = {};
63 TAILQ_HEAD(, spdk_accel_driver) g_accel_drivers = TAILQ_HEAD_INITIALIZER(g_accel_drivers);
64 static struct spdk_accel_driver *g_accel_driver;
65 
66 static const char *g_opcode_strings[ACCEL_OPC_LAST] = {
67 	"copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c",
68 	"compress", "decompress", "encrypt", "decrypt", "xor"
69 };
70 
71 enum accel_sequence_state {
72 	ACCEL_SEQUENCE_STATE_INIT,
73 	ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF,
74 	ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF,
75 	ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF,
76 	ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF,
77 	ACCEL_SEQUENCE_STATE_PULL_DATA,
78 	ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA,
79 	ACCEL_SEQUENCE_STATE_EXEC_TASK,
80 	ACCEL_SEQUENCE_STATE_AWAIT_TASK,
81 	ACCEL_SEQUENCE_STATE_COMPLETE_TASK,
82 	ACCEL_SEQUENCE_STATE_NEXT_TASK,
83 	ACCEL_SEQUENCE_STATE_PUSH_DATA,
84 	ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA,
85 	ACCEL_SEQUENCE_STATE_DRIVER_EXEC,
86 	ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK,
87 	ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE,
88 	ACCEL_SEQUENCE_STATE_ERROR,
89 	ACCEL_SEQUENCE_STATE_MAX,
90 };
91 
92 static const char *g_seq_states[]
93 __attribute__((unused)) = {
94 	[ACCEL_SEQUENCE_STATE_INIT] = "init",
95 	[ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf",
96 	[ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf",
97 	[ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf",
98 	[ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf",
99 	[ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data",
100 	[ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data",
101 	[ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task",
102 	[ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task",
103 	[ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task",
104 	[ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task",
105 	[ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data",
106 	[ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data",
107 	[ACCEL_SEQUENCE_STATE_DRIVER_EXEC] = "driver-exec",
108 	[ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK] = "driver-await-task",
109 	[ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE] = "driver-complete",
110 	[ACCEL_SEQUENCE_STATE_ERROR] = "error",
111 	[ACCEL_SEQUENCE_STATE_MAX] = "",
112 };
113 
114 #define ACCEL_SEQUENCE_STATE_STRING(s) \
115 	(((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \
116 	 ? g_seq_states[s] : "unknown")
117 
118 struct accel_buffer {
119 	struct spdk_accel_sequence	*seq;
120 	void				*buf;
121 	uint64_t			len;
122 	struct spdk_iobuf_entry		iobuf;
123 	spdk_accel_sequence_get_buf_cb	cb_fn;
124 	void				*cb_ctx;
125 	TAILQ_ENTRY(accel_buffer)	link;
126 };
127 
128 struct accel_io_channel {
129 	struct spdk_io_channel			*module_ch[ACCEL_OPC_LAST];
130 	void					*task_pool_base;
131 	struct spdk_accel_sequence		*seq_pool_base;
132 	struct accel_buffer			*buf_pool_base;
133 	TAILQ_HEAD(, spdk_accel_task)		task_pool;
134 	TAILQ_HEAD(, spdk_accel_sequence)	seq_pool;
135 	TAILQ_HEAD(, accel_buffer)		buf_pool;
136 	struct spdk_iobuf_channel		iobuf;
137 };
138 
139 TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task);
140 
141 struct spdk_accel_sequence {
142 	struct accel_io_channel			*ch;
143 	struct accel_sequence_tasks		tasks;
144 	struct accel_sequence_tasks		completed;
145 	TAILQ_HEAD(, accel_buffer)		bounce_bufs;
146 	enum accel_sequence_state		state;
147 	int					status;
148 	bool					in_process_sequence;
149 	spdk_accel_completion_cb		cb_fn;
150 	void					*cb_arg;
151 	TAILQ_ENTRY(spdk_accel_sequence)	link;
152 };
153 
154 static inline void
155 accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state)
156 {
157 	SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq,
158 		      ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state));
159 	assert(seq->state != ACCEL_SEQUENCE_STATE_ERROR || state == ACCEL_SEQUENCE_STATE_ERROR);
160 	seq->state = state;
161 }
162 
163 static void
164 accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status)
165 {
166 	accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
167 	assert(status != 0);
168 	seq->status = status;
169 }
170 
171 int
172 spdk_accel_get_opc_module_name(enum accel_opcode opcode, const char **module_name)
173 {
174 	if (opcode >= ACCEL_OPC_LAST) {
175 		/* invalid opcode */
176 		return -EINVAL;
177 	}
178 
179 	if (g_modules_opc[opcode].module) {
180 		*module_name = g_modules_opc[opcode].module->name;
181 	} else {
182 		return -ENOENT;
183 	}
184 
185 	return 0;
186 }
187 
188 void
189 _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn)
190 {
191 	struct spdk_accel_module_if *accel_module;
192 	enum accel_opcode opcode;
193 	int j = 0;
194 
195 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
196 		for (opcode = 0; opcode < ACCEL_OPC_LAST; opcode++) {
197 			if (accel_module->supports_opcode(opcode)) {
198 				info->ops[j] = opcode;
199 				j++;
200 			}
201 		}
202 		info->name = accel_module->name;
203 		info->num_ops = j;
204 		fn(info);
205 		j = 0;
206 	}
207 }
208 
209 int
210 _accel_get_opc_name(enum accel_opcode opcode, const char **opcode_name)
211 {
212 	int rc = 0;
213 
214 	if (opcode < ACCEL_OPC_LAST) {
215 		*opcode_name = g_opcode_strings[opcode];
216 	} else {
217 		/* invalid opcode */
218 		rc = -EINVAL;
219 	}
220 
221 	return rc;
222 }
223 
224 int
225 spdk_accel_assign_opc(enum accel_opcode opcode, const char *name)
226 {
227 	if (g_modules_started == true) {
228 		/* we don't allow re-assignment once things have started */
229 		return -EINVAL;
230 	}
231 
232 	if (opcode >= ACCEL_OPC_LAST) {
233 		/* invalid opcode */
234 		return -EINVAL;
235 	}
236 
237 	/* module selection will be validated after the framework starts. */
238 	g_modules_opc_override[opcode] = strdup(name);
239 
240 	return 0;
241 }
242 
243 void
244 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status)
245 {
246 	struct accel_io_channel *accel_ch = accel_task->accel_ch;
247 	spdk_accel_completion_cb	cb_fn = accel_task->cb_fn;
248 	void				*cb_arg = accel_task->cb_arg;
249 
250 	/* We should put the accel_task into the list firstly in order to avoid
251 	 * the accel task list is exhausted when there is recursive call to
252 	 * allocate accel_task in user's call back function (cb_fn)
253 	 */
254 	TAILQ_INSERT_HEAD(&accel_ch->task_pool, accel_task, link);
255 
256 	cb_fn(cb_arg, status);
257 }
258 
259 inline static struct spdk_accel_task *
260 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg)
261 {
262 	struct spdk_accel_task *accel_task;
263 
264 	accel_task = TAILQ_FIRST(&accel_ch->task_pool);
265 	if (accel_task == NULL) {
266 		return NULL;
267 	}
268 
269 	TAILQ_REMOVE(&accel_ch->task_pool, accel_task, link);
270 	accel_task->link.tqe_next = NULL;
271 	accel_task->link.tqe_prev = NULL;
272 
273 	accel_task->cb_fn = cb_fn;
274 	accel_task->cb_arg = cb_arg;
275 	accel_task->accel_ch = accel_ch;
276 	accel_task->bounce.s.orig_iovs = NULL;
277 	accel_task->bounce.d.orig_iovs = NULL;
278 
279 	return accel_task;
280 }
281 
282 /* Accel framework public API for copy function */
283 int
284 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src,
285 		       uint64_t nbytes, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
286 {
287 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
288 	struct spdk_accel_task *accel_task;
289 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_COPY].module;
290 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_COPY];
291 
292 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
293 	if (accel_task == NULL) {
294 		return -ENOMEM;
295 	}
296 
297 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
298 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
299 	accel_task->d.iovs[0].iov_base = dst;
300 	accel_task->d.iovs[0].iov_len = nbytes;
301 	accel_task->d.iovcnt = 1;
302 	accel_task->s.iovs[0].iov_base = src;
303 	accel_task->s.iovs[0].iov_len = nbytes;
304 	accel_task->s.iovcnt = 1;
305 	accel_task->op_code = ACCEL_OPC_COPY;
306 	accel_task->flags = flags;
307 	accel_task->src_domain = NULL;
308 	accel_task->dst_domain = NULL;
309 	accel_task->step_cb_fn = NULL;
310 
311 	return module->submit_tasks(module_ch, accel_task);
312 }
313 
314 /* Accel framework public API for dual cast copy function */
315 int
316 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1,
317 			   void *dst2, void *src, uint64_t nbytes, int flags,
318 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
319 {
320 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
321 	struct spdk_accel_task *accel_task;
322 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_DUALCAST].module;
323 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_DUALCAST];
324 
325 	if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) {
326 		SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n");
327 		return -EINVAL;
328 	}
329 
330 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
331 	if (accel_task == NULL) {
332 		return -ENOMEM;
333 	}
334 
335 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
336 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
337 	accel_task->d2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST2];
338 	accel_task->d.iovs[0].iov_base = dst1;
339 	accel_task->d.iovs[0].iov_len = nbytes;
340 	accel_task->d.iovcnt = 1;
341 	accel_task->d2.iovs[0].iov_base = dst2;
342 	accel_task->d2.iovs[0].iov_len = nbytes;
343 	accel_task->d2.iovcnt = 1;
344 	accel_task->s.iovs[0].iov_base = src;
345 	accel_task->s.iovs[0].iov_len = nbytes;
346 	accel_task->s.iovcnt = 1;
347 	accel_task->flags = flags;
348 	accel_task->op_code = ACCEL_OPC_DUALCAST;
349 	accel_task->src_domain = NULL;
350 	accel_task->dst_domain = NULL;
351 	accel_task->step_cb_fn = NULL;
352 
353 	return module->submit_tasks(module_ch, accel_task);
354 }
355 
356 /* Accel framework public API for compare function */
357 int
358 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1,
359 			  void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
360 			  void *cb_arg)
361 {
362 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
363 	struct spdk_accel_task *accel_task;
364 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_COMPARE].module;
365 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_COMPARE];
366 
367 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
368 	if (accel_task == NULL) {
369 		return -ENOMEM;
370 	}
371 
372 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
373 	accel_task->s2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC2];
374 	accel_task->s.iovs[0].iov_base = src1;
375 	accel_task->s.iovs[0].iov_len = nbytes;
376 	accel_task->s.iovcnt = 1;
377 	accel_task->s2.iovs[0].iov_base = src2;
378 	accel_task->s2.iovs[0].iov_len = nbytes;
379 	accel_task->s2.iovcnt = 1;
380 	accel_task->op_code = ACCEL_OPC_COMPARE;
381 	accel_task->src_domain = NULL;
382 	accel_task->dst_domain = NULL;
383 	accel_task->step_cb_fn = NULL;
384 
385 	return module->submit_tasks(module_ch, accel_task);
386 }
387 
388 /* Accel framework public API for fill function */
389 int
390 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst,
391 		       uint8_t fill, uint64_t nbytes, int flags,
392 		       spdk_accel_completion_cb cb_fn, void *cb_arg)
393 {
394 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
395 	struct spdk_accel_task *accel_task;
396 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_FILL].module;
397 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_FILL];
398 
399 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
400 	if (accel_task == NULL) {
401 		return -ENOMEM;
402 	}
403 
404 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
405 	accel_task->d.iovs[0].iov_base = dst;
406 	accel_task->d.iovs[0].iov_len = nbytes;
407 	accel_task->d.iovcnt = 1;
408 	memset(&accel_task->fill_pattern, fill, sizeof(uint64_t));
409 	accel_task->flags = flags;
410 	accel_task->op_code = ACCEL_OPC_FILL;
411 	accel_task->src_domain = NULL;
412 	accel_task->dst_domain = NULL;
413 	accel_task->step_cb_fn = NULL;
414 
415 	return module->submit_tasks(module_ch, accel_task);
416 }
417 
418 /* Accel framework public API for CRC-32C function */
419 int
420 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst,
421 			 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
422 			 void *cb_arg)
423 {
424 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
425 	struct spdk_accel_task *accel_task;
426 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_CRC32C].module;
427 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_CRC32C];
428 
429 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
430 	if (accel_task == NULL) {
431 		return -ENOMEM;
432 	}
433 
434 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
435 	accel_task->s.iovs[0].iov_base = src;
436 	accel_task->s.iovs[0].iov_len = nbytes;
437 	accel_task->s.iovcnt = 1;
438 	accel_task->crc_dst = crc_dst;
439 	accel_task->seed = seed;
440 	accel_task->op_code = ACCEL_OPC_CRC32C;
441 	accel_task->src_domain = NULL;
442 	accel_task->dst_domain = NULL;
443 	accel_task->step_cb_fn = NULL;
444 
445 	return module->submit_tasks(module_ch, accel_task);
446 }
447 
448 /* Accel framework public API for chained CRC-32C function */
449 int
450 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst,
451 			  struct iovec *iov, uint32_t iov_cnt, uint32_t seed,
452 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
453 {
454 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
455 	struct spdk_accel_task *accel_task;
456 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_CRC32C].module;
457 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_CRC32C];
458 
459 	if (iov == NULL) {
460 		SPDK_ERRLOG("iov should not be NULL");
461 		return -EINVAL;
462 	}
463 
464 	if (!iov_cnt) {
465 		SPDK_ERRLOG("iovcnt should not be zero value\n");
466 		return -EINVAL;
467 	}
468 
469 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
470 	if (accel_task == NULL) {
471 		SPDK_ERRLOG("no memory\n");
472 		assert(0);
473 		return -ENOMEM;
474 	}
475 
476 	accel_task->s.iovs = iov;
477 	accel_task->s.iovcnt = iov_cnt;
478 	accel_task->crc_dst = crc_dst;
479 	accel_task->seed = seed;
480 	accel_task->op_code = ACCEL_OPC_CRC32C;
481 	accel_task->src_domain = NULL;
482 	accel_task->dst_domain = NULL;
483 	accel_task->step_cb_fn = NULL;
484 
485 	return module->submit_tasks(module_ch, accel_task);
486 }
487 
488 /* Accel framework public API for copy with CRC-32C function */
489 int
490 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst,
491 			      void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes,
492 			      int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
493 {
494 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
495 	struct spdk_accel_task *accel_task;
496 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_COPY_CRC32C].module;
497 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_COPY_CRC32C];
498 
499 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
500 	if (accel_task == NULL) {
501 		return -ENOMEM;
502 	}
503 
504 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
505 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
506 	accel_task->d.iovs[0].iov_base = dst;
507 	accel_task->d.iovs[0].iov_len = nbytes;
508 	accel_task->d.iovcnt = 1;
509 	accel_task->s.iovs[0].iov_base = src;
510 	accel_task->s.iovs[0].iov_len = nbytes;
511 	accel_task->s.iovcnt = 1;
512 	accel_task->crc_dst = crc_dst;
513 	accel_task->seed = seed;
514 	accel_task->flags = flags;
515 	accel_task->op_code = ACCEL_OPC_COPY_CRC32C;
516 	accel_task->src_domain = NULL;
517 	accel_task->dst_domain = NULL;
518 	accel_task->step_cb_fn = NULL;
519 
520 	return module->submit_tasks(module_ch, accel_task);
521 }
522 
523 /* Accel framework public API for chained copy + CRC-32C function */
524 int
525 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst,
526 			       struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst,
527 			       uint32_t seed, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
528 {
529 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
530 	struct spdk_accel_task *accel_task;
531 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_COPY_CRC32C].module;
532 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_COPY_CRC32C];
533 	uint64_t nbytes;
534 	uint32_t i;
535 
536 	if (src_iovs == NULL) {
537 		SPDK_ERRLOG("iov should not be NULL");
538 		return -EINVAL;
539 	}
540 
541 	if (!iov_cnt) {
542 		SPDK_ERRLOG("iovcnt should not be zero value\n");
543 		return -EINVAL;
544 	}
545 
546 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
547 	if (accel_task == NULL) {
548 		SPDK_ERRLOG("no memory\n");
549 		assert(0);
550 		return -ENOMEM;
551 	}
552 
553 	nbytes = 0;
554 	for (i = 0; i < iov_cnt; i++) {
555 		nbytes += src_iovs[i].iov_len;
556 	}
557 
558 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
559 	accel_task->d.iovs[0].iov_base = dst;
560 	accel_task->d.iovs[0].iov_len = nbytes;
561 	accel_task->d.iovcnt = 1;
562 	accel_task->s.iovs = src_iovs;
563 	accel_task->s.iovcnt = iov_cnt;
564 	accel_task->crc_dst = crc_dst;
565 	accel_task->seed = seed;
566 	accel_task->flags = flags;
567 	accel_task->op_code = ACCEL_OPC_COPY_CRC32C;
568 	accel_task->src_domain = NULL;
569 	accel_task->dst_domain = NULL;
570 	accel_task->step_cb_fn = NULL;
571 
572 	return module->submit_tasks(module_ch, accel_task);
573 }
574 
575 int
576 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes,
577 			   struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size, int flags,
578 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
579 {
580 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
581 	struct spdk_accel_task *accel_task;
582 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_COMPRESS].module;
583 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_COMPRESS];
584 
585 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
586 	if (accel_task == NULL) {
587 		return -ENOMEM;
588 	}
589 
590 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
591 	accel_task->d.iovs[0].iov_base = dst;
592 	accel_task->d.iovs[0].iov_len = nbytes;
593 	accel_task->d.iovcnt = 1;
594 	accel_task->output_size = output_size;
595 	accel_task->s.iovs = src_iovs;
596 	accel_task->s.iovcnt = src_iovcnt;
597 	accel_task->flags = flags;
598 	accel_task->op_code = ACCEL_OPC_COMPRESS;
599 	accel_task->src_domain = NULL;
600 	accel_task->dst_domain = NULL;
601 	accel_task->step_cb_fn = NULL;
602 
603 	return module->submit_tasks(module_ch, accel_task);
604 }
605 
606 int
607 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs,
608 			     size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
609 			     uint32_t *output_size, int flags, spdk_accel_completion_cb cb_fn,
610 			     void *cb_arg)
611 {
612 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
613 	struct spdk_accel_task *accel_task;
614 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_DECOMPRESS].module;
615 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_DECOMPRESS];
616 
617 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
618 	if (accel_task == NULL) {
619 		return -ENOMEM;
620 	}
621 
622 	accel_task->output_size = output_size;
623 	accel_task->s.iovs = src_iovs;
624 	accel_task->s.iovcnt = src_iovcnt;
625 	accel_task->d.iovs = dst_iovs;
626 	accel_task->d.iovcnt = dst_iovcnt;
627 	accel_task->flags = flags;
628 	accel_task->op_code = ACCEL_OPC_DECOMPRESS;
629 	accel_task->src_domain = NULL;
630 	accel_task->dst_domain = NULL;
631 	accel_task->step_cb_fn = NULL;
632 
633 	return module->submit_tasks(module_ch, accel_task);
634 }
635 
636 int
637 spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
638 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
639 			  struct iovec *src_iovs, uint32_t src_iovcnt,
640 			  uint64_t iv, uint32_t block_size, int flags,
641 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
642 {
643 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
644 	struct spdk_accel_task *accel_task;
645 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_ENCRYPT].module;
646 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_ENCRYPT];
647 
648 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
649 		return -EINVAL;
650 	}
651 
652 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
653 	if (accel_task == NULL) {
654 		return -ENOMEM;
655 	}
656 
657 	accel_task->crypto_key = key;
658 	accel_task->s.iovs = src_iovs;
659 	accel_task->s.iovcnt = src_iovcnt;
660 	accel_task->d.iovs = dst_iovs;
661 	accel_task->d.iovcnt = dst_iovcnt;
662 	accel_task->iv = iv;
663 	accel_task->block_size = block_size;
664 	accel_task->flags = flags;
665 	accel_task->op_code = ACCEL_OPC_ENCRYPT;
666 	accel_task->src_domain = NULL;
667 	accel_task->dst_domain = NULL;
668 	accel_task->step_cb_fn = NULL;
669 
670 	return module->submit_tasks(module_ch, accel_task);
671 }
672 
673 int
674 spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
675 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
676 			  struct iovec *src_iovs, uint32_t src_iovcnt,
677 			  uint64_t iv, uint32_t block_size, int flags,
678 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
679 {
680 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
681 	struct spdk_accel_task *accel_task;
682 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_DECRYPT].module;
683 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_DECRYPT];
684 
685 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
686 		return -EINVAL;
687 	}
688 
689 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
690 	if (accel_task == NULL) {
691 		return -ENOMEM;
692 	}
693 
694 	accel_task->crypto_key = key;
695 	accel_task->s.iovs = src_iovs;
696 	accel_task->s.iovcnt = src_iovcnt;
697 	accel_task->d.iovs = dst_iovs;
698 	accel_task->d.iovcnt = dst_iovcnt;
699 	accel_task->iv = iv;
700 	accel_task->block_size = block_size;
701 	accel_task->flags = flags;
702 	accel_task->op_code = ACCEL_OPC_DECRYPT;
703 	accel_task->src_domain = NULL;
704 	accel_task->dst_domain = NULL;
705 	accel_task->step_cb_fn = NULL;
706 
707 	return module->submit_tasks(module_ch, accel_task);
708 }
709 
710 int
711 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs,
712 		      uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
713 {
714 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
715 	struct spdk_accel_task *accel_task;
716 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_XOR].module;
717 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_XOR];
718 
719 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
720 	if (accel_task == NULL) {
721 		return -ENOMEM;
722 	}
723 
724 	accel_task->nsrcs.srcs = sources;
725 	accel_task->nsrcs.cnt = nsrcs;
726 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
727 	accel_task->d.iovs[0].iov_base = dst;
728 	accel_task->d.iovs[0].iov_len = nbytes;
729 	accel_task->d.iovcnt = 1;
730 	accel_task->op_code = ACCEL_OPC_XOR;
731 	accel_task->src_domain = NULL;
732 	accel_task->dst_domain = NULL;
733 	accel_task->step_cb_fn = NULL;
734 
735 	return module->submit_tasks(module_ch, accel_task);
736 }
737 
738 static inline struct accel_buffer *
739 accel_get_buf(struct accel_io_channel *ch, uint64_t len)
740 {
741 	struct accel_buffer *buf;
742 
743 	buf = TAILQ_FIRST(&ch->buf_pool);
744 	if (spdk_unlikely(buf == NULL)) {
745 		return NULL;
746 	}
747 
748 	TAILQ_REMOVE(&ch->buf_pool, buf, link);
749 	buf->len = len;
750 	buf->buf = NULL;
751 	buf->seq = NULL;
752 	buf->cb_fn = NULL;
753 
754 	return buf;
755 }
756 
757 static inline void
758 accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf)
759 {
760 	if (buf->buf != NULL) {
761 		spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len);
762 	}
763 
764 	TAILQ_INSERT_HEAD(&ch->buf_pool, buf, link);
765 }
766 
767 static inline struct spdk_accel_sequence *
768 accel_sequence_get(struct accel_io_channel *ch)
769 {
770 	struct spdk_accel_sequence *seq;
771 
772 	seq = TAILQ_FIRST(&ch->seq_pool);
773 	if (seq == NULL) {
774 		return NULL;
775 	}
776 
777 	TAILQ_REMOVE(&ch->seq_pool, seq, link);
778 
779 	TAILQ_INIT(&seq->tasks);
780 	TAILQ_INIT(&seq->completed);
781 	TAILQ_INIT(&seq->bounce_bufs);
782 
783 	seq->ch = ch;
784 	seq->status = 0;
785 	seq->state = ACCEL_SEQUENCE_STATE_INIT;
786 	seq->in_process_sequence = false;
787 
788 	return seq;
789 }
790 
791 static inline void
792 accel_sequence_put(struct spdk_accel_sequence *seq)
793 {
794 	struct accel_io_channel *ch = seq->ch;
795 	struct accel_buffer *buf;
796 
797 	while (!TAILQ_EMPTY(&seq->bounce_bufs)) {
798 		buf = TAILQ_FIRST(&seq->bounce_bufs);
799 		TAILQ_REMOVE(&seq->bounce_bufs, buf, link);
800 		accel_put_buf(seq->ch, buf);
801 	}
802 
803 	assert(TAILQ_EMPTY(&seq->tasks));
804 	assert(TAILQ_EMPTY(&seq->completed));
805 	seq->ch = NULL;
806 
807 	TAILQ_INSERT_HEAD(&ch->seq_pool, seq, link);
808 }
809 
810 static void accel_sequence_task_cb(void *cb_arg, int status);
811 
812 static inline struct spdk_accel_task *
813 accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq,
814 			spdk_accel_step_cb cb_fn, void *cb_arg)
815 {
816 	struct spdk_accel_task *task;
817 
818 	task = _get_task(ch, accel_sequence_task_cb, seq);
819 	if (task == NULL) {
820 		return task;
821 	}
822 
823 	task->step_cb_fn = cb_fn;
824 	task->step_cb_arg = cb_arg;
825 
826 	return task;
827 }
828 
829 int
830 spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
831 		       struct iovec *dst_iovs, uint32_t dst_iovcnt,
832 		       struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
833 		       struct iovec *src_iovs, uint32_t src_iovcnt,
834 		       struct spdk_memory_domain *src_domain, void *src_domain_ctx,
835 		       int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
836 {
837 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
838 	struct spdk_accel_task *task;
839 	struct spdk_accel_sequence *seq = *pseq;
840 
841 	if (seq == NULL) {
842 		seq = accel_sequence_get(accel_ch);
843 		if (spdk_unlikely(seq == NULL)) {
844 			return -ENOMEM;
845 		}
846 	}
847 
848 	assert(seq->ch == accel_ch);
849 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
850 	if (spdk_unlikely(task == NULL)) {
851 		if (*pseq == NULL) {
852 			accel_sequence_put(seq);
853 		}
854 
855 		return -ENOMEM;
856 	}
857 
858 	task->dst_domain = dst_domain;
859 	task->dst_domain_ctx = dst_domain_ctx;
860 	task->d.iovs = dst_iovs;
861 	task->d.iovcnt = dst_iovcnt;
862 	task->src_domain = src_domain;
863 	task->src_domain_ctx = src_domain_ctx;
864 	task->s.iovs = src_iovs;
865 	task->s.iovcnt = src_iovcnt;
866 	task->flags = flags;
867 	task->op_code = ACCEL_OPC_COPY;
868 
869 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
870 	*pseq = seq;
871 
872 	return 0;
873 }
874 
875 int
876 spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
877 		       void *buf, uint64_t len,
878 		       struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern,
879 		       int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
880 {
881 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
882 	struct spdk_accel_task *task;
883 	struct spdk_accel_sequence *seq = *pseq;
884 
885 	if (seq == NULL) {
886 		seq = accel_sequence_get(accel_ch);
887 		if (spdk_unlikely(seq == NULL)) {
888 			return -ENOMEM;
889 		}
890 	}
891 
892 	assert(seq->ch == accel_ch);
893 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
894 	if (spdk_unlikely(task == NULL)) {
895 		if (*pseq == NULL) {
896 			accel_sequence_put(seq);
897 		}
898 
899 		return -ENOMEM;
900 	}
901 
902 	memset(&task->fill_pattern, pattern, sizeof(uint64_t));
903 
904 	task->d.iovs = &task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
905 	task->d.iovs[0].iov_base = buf;
906 	task->d.iovs[0].iov_len = len;
907 	task->d.iovcnt = 1;
908 	task->src_domain = NULL;
909 	task->dst_domain = domain;
910 	task->dst_domain_ctx = domain_ctx;
911 	task->flags = flags;
912 	task->op_code = ACCEL_OPC_FILL;
913 
914 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
915 	*pseq = seq;
916 
917 	return 0;
918 }
919 
920 int
921 spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
922 			     struct iovec *dst_iovs, size_t dst_iovcnt,
923 			     struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
924 			     struct iovec *src_iovs, size_t src_iovcnt,
925 			     struct spdk_memory_domain *src_domain, void *src_domain_ctx,
926 			     int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
927 {
928 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
929 	struct spdk_accel_task *task;
930 	struct spdk_accel_sequence *seq = *pseq;
931 
932 	if (seq == NULL) {
933 		seq = accel_sequence_get(accel_ch);
934 		if (spdk_unlikely(seq == NULL)) {
935 			return -ENOMEM;
936 		}
937 	}
938 
939 	assert(seq->ch == accel_ch);
940 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
941 	if (spdk_unlikely(task == NULL)) {
942 		if (*pseq == NULL) {
943 			accel_sequence_put(seq);
944 		}
945 
946 		return -ENOMEM;
947 	}
948 
949 	/* TODO: support output_size for chaining */
950 	task->output_size = NULL;
951 	task->dst_domain = dst_domain;
952 	task->dst_domain_ctx = dst_domain_ctx;
953 	task->d.iovs = dst_iovs;
954 	task->d.iovcnt = dst_iovcnt;
955 	task->src_domain = src_domain;
956 	task->src_domain_ctx = src_domain_ctx;
957 	task->s.iovs = src_iovs;
958 	task->s.iovcnt = src_iovcnt;
959 	task->flags = flags;
960 	task->op_code = ACCEL_OPC_DECOMPRESS;
961 
962 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
963 	*pseq = seq;
964 
965 	return 0;
966 }
967 
968 int
969 spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
970 			  struct spdk_accel_crypto_key *key,
971 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
972 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
973 			  struct iovec *src_iovs, uint32_t src_iovcnt,
974 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
975 			  uint64_t iv, uint32_t block_size, int flags,
976 			  spdk_accel_step_cb cb_fn, void *cb_arg)
977 {
978 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
979 	struct spdk_accel_task *task;
980 	struct spdk_accel_sequence *seq = *pseq;
981 
982 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key ||
983 			  !block_size)) {
984 		return -EINVAL;
985 	}
986 
987 	if (seq == NULL) {
988 		seq = accel_sequence_get(accel_ch);
989 		if (spdk_unlikely(seq == NULL)) {
990 			return -ENOMEM;
991 		}
992 	}
993 
994 	assert(seq->ch == accel_ch);
995 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
996 	if (spdk_unlikely(task == NULL)) {
997 		if (*pseq == NULL) {
998 			accel_sequence_put(seq);
999 		}
1000 
1001 		return -ENOMEM;
1002 	}
1003 
1004 	task->crypto_key = key;
1005 	task->src_domain = src_domain;
1006 	task->src_domain_ctx = src_domain_ctx;
1007 	task->s.iovs = src_iovs;
1008 	task->s.iovcnt = src_iovcnt;
1009 	task->dst_domain = dst_domain;
1010 	task->dst_domain_ctx = dst_domain_ctx;
1011 	task->d.iovs = dst_iovs;
1012 	task->d.iovcnt = dst_iovcnt;
1013 	task->iv = iv;
1014 	task->block_size = block_size;
1015 	task->flags = flags;
1016 	task->op_code = ACCEL_OPC_ENCRYPT;
1017 
1018 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1019 	*pseq = seq;
1020 
1021 	return 0;
1022 }
1023 
1024 int
1025 spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1026 			  struct spdk_accel_crypto_key *key,
1027 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
1028 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1029 			  struct iovec *src_iovs, uint32_t src_iovcnt,
1030 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1031 			  uint64_t iv, uint32_t block_size, int flags,
1032 			  spdk_accel_step_cb cb_fn, void *cb_arg)
1033 {
1034 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1035 	struct spdk_accel_task *task;
1036 	struct spdk_accel_sequence *seq = *pseq;
1037 
1038 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key ||
1039 			  !block_size)) {
1040 		return -EINVAL;
1041 	}
1042 
1043 	if (seq == NULL) {
1044 		seq = accel_sequence_get(accel_ch);
1045 		if (spdk_unlikely(seq == NULL)) {
1046 			return -ENOMEM;
1047 		}
1048 	}
1049 
1050 	assert(seq->ch == accel_ch);
1051 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1052 	if (spdk_unlikely(task == NULL)) {
1053 		if (*pseq == NULL) {
1054 			accel_sequence_put(seq);
1055 		}
1056 
1057 		return -ENOMEM;
1058 	}
1059 
1060 	task->crypto_key = key;
1061 	task->src_domain = src_domain;
1062 	task->src_domain_ctx = src_domain_ctx;
1063 	task->s.iovs = src_iovs;
1064 	task->s.iovcnt = src_iovcnt;
1065 	task->dst_domain = dst_domain;
1066 	task->dst_domain_ctx = dst_domain_ctx;
1067 	task->d.iovs = dst_iovs;
1068 	task->d.iovcnt = dst_iovcnt;
1069 	task->iv = iv;
1070 	task->block_size = block_size;
1071 	task->flags = flags;
1072 	task->op_code = ACCEL_OPC_DECRYPT;
1073 
1074 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1075 	*pseq = seq;
1076 
1077 	return 0;
1078 }
1079 
1080 int
1081 spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf,
1082 		   struct spdk_memory_domain **domain, void **domain_ctx)
1083 {
1084 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1085 	struct accel_buffer *accel_buf;
1086 
1087 	accel_buf = accel_get_buf(accel_ch, len);
1088 	if (spdk_unlikely(accel_buf == NULL)) {
1089 		return -ENOMEM;
1090 	}
1091 
1092 	/* We always return the same pointer and identify the buffers through domain_ctx */
1093 	*buf = ACCEL_BUFFER_BASE;
1094 	*domain_ctx = accel_buf;
1095 	*domain = g_accel_domain;
1096 
1097 	return 0;
1098 }
1099 
1100 void
1101 spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf,
1102 		   struct spdk_memory_domain *domain, void *domain_ctx)
1103 {
1104 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1105 	struct accel_buffer *accel_buf = domain_ctx;
1106 
1107 	assert(domain == g_accel_domain);
1108 	assert(buf == ACCEL_BUFFER_BASE);
1109 
1110 	accel_put_buf(accel_ch, accel_buf);
1111 }
1112 
1113 static void
1114 accel_sequence_complete_tasks(struct spdk_accel_sequence *seq)
1115 {
1116 	struct spdk_accel_task *task;
1117 	struct accel_io_channel *ch = seq->ch;
1118 	spdk_accel_step_cb cb_fn;
1119 	void *cb_arg;
1120 
1121 	while (!TAILQ_EMPTY(&seq->completed)) {
1122 		task = TAILQ_FIRST(&seq->completed);
1123 		TAILQ_REMOVE(&seq->completed, task, seq_link);
1124 		cb_fn = task->step_cb_fn;
1125 		cb_arg = task->step_cb_arg;
1126 		TAILQ_INSERT_HEAD(&ch->task_pool, task, link);
1127 		if (cb_fn != NULL) {
1128 			cb_fn(cb_arg);
1129 		}
1130 	}
1131 
1132 	while (!TAILQ_EMPTY(&seq->tasks)) {
1133 		task = TAILQ_FIRST(&seq->tasks);
1134 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1135 		cb_fn = task->step_cb_fn;
1136 		cb_arg = task->step_cb_arg;
1137 		TAILQ_INSERT_HEAD(&ch->task_pool, task, link);
1138 		if (cb_fn != NULL) {
1139 			cb_fn(cb_arg);
1140 		}
1141 	}
1142 }
1143 
1144 static void
1145 accel_sequence_complete(struct spdk_accel_sequence *seq)
1146 {
1147 	SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, seq->status);
1148 
1149 	/* First notify all users that appended operations to this sequence */
1150 	accel_sequence_complete_tasks(seq);
1151 
1152 	/* Then notify the user that finished the sequence */
1153 	seq->cb_fn(seq->cb_arg, seq->status);
1154 
1155 	accel_sequence_put(seq);
1156 }
1157 
1158 static void
1159 accel_update_buf(void **buf, struct accel_buffer *accel_buf)
1160 {
1161 	uintptr_t offset;
1162 
1163 	offset = (uintptr_t)(*buf) & ACCEL_BUFFER_OFFSET_MASK;
1164 	assert(offset < accel_buf->len);
1165 
1166 	*buf = (char *)accel_buf->buf + offset;
1167 }
1168 
1169 static void
1170 accel_update_iovs(struct iovec *iovs, uint32_t iovcnt, struct accel_buffer *buf)
1171 {
1172 	uint32_t i;
1173 
1174 	for (i = 0; i < iovcnt; ++i) {
1175 		accel_update_buf(&iovs[i].iov_base, buf);
1176 	}
1177 }
1178 
1179 static void
1180 accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf)
1181 {
1182 	struct spdk_accel_task *task;
1183 
1184 	/* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks
1185 	 * in a sequence that were using it.
1186 	 */
1187 	TAILQ_FOREACH(task, &seq->tasks, seq_link) {
1188 		if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) {
1189 			accel_update_iovs(task->s.iovs, task->s.iovcnt, buf);
1190 			task->src_domain = NULL;
1191 		}
1192 		if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) {
1193 			accel_update_iovs(task->d.iovs, task->d.iovcnt, buf);
1194 			task->dst_domain = NULL;
1195 		}
1196 	}
1197 }
1198 
1199 static void accel_process_sequence(struct spdk_accel_sequence *seq);
1200 
1201 static void
1202 accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf)
1203 {
1204 	struct accel_buffer *accel_buf;
1205 
1206 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1207 
1208 	assert(accel_buf->seq != NULL);
1209 	assert(accel_buf->buf == NULL);
1210 	accel_buf->buf = buf;
1211 
1212 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1213 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1214 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1215 	accel_process_sequence(accel_buf->seq);
1216 }
1217 
1218 static bool
1219 accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf,
1220 			 spdk_iobuf_get_cb cb_fn)
1221 {
1222 	struct accel_io_channel *ch = seq->ch;
1223 
1224 	assert(buf->buf == NULL);
1225 	assert(buf->seq == NULL);
1226 
1227 	buf->seq = seq;
1228 	buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn);
1229 	if (buf->buf == NULL) {
1230 		return false;
1231 	}
1232 
1233 	return true;
1234 }
1235 
1236 static bool
1237 accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1238 {
1239 	/* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to
1240 	 * NULL */
1241 	if (task->src_domain == g_accel_domain) {
1242 		if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx,
1243 					      accel_iobuf_get_virtbuf_cb)) {
1244 			return false;
1245 		}
1246 
1247 		accel_sequence_set_virtbuf(seq, task->src_domain_ctx);
1248 	}
1249 
1250 	if (task->dst_domain == g_accel_domain) {
1251 		if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx,
1252 					      accel_iobuf_get_virtbuf_cb)) {
1253 			return false;
1254 		}
1255 
1256 		accel_sequence_set_virtbuf(seq, task->dst_domain_ctx);
1257 	}
1258 
1259 	return true;
1260 }
1261 
1262 static void
1263 accel_sequence_get_buf_cb(struct spdk_iobuf_entry *entry, void *buf)
1264 {
1265 	struct accel_buffer *accel_buf;
1266 
1267 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1268 
1269 	assert(accel_buf->seq != NULL);
1270 	assert(accel_buf->buf == NULL);
1271 	accel_buf->buf = buf;
1272 
1273 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1274 	accel_buf->cb_fn(accel_buf->seq, accel_buf->cb_ctx);
1275 }
1276 
1277 bool
1278 spdk_accel_alloc_sequence_buf(struct spdk_accel_sequence *seq, void *buf,
1279 			      struct spdk_memory_domain *domain, void *domain_ctx,
1280 			      spdk_accel_sequence_get_buf_cb cb_fn, void *cb_ctx)
1281 {
1282 	struct accel_buffer *accel_buf = domain_ctx;
1283 
1284 	assert(domain == g_accel_domain);
1285 	accel_buf->cb_fn = cb_fn;
1286 	accel_buf->cb_ctx = cb_ctx;
1287 
1288 	if (!accel_sequence_alloc_buf(seq, accel_buf, accel_sequence_get_buf_cb)) {
1289 		return false;
1290 	}
1291 
1292 	accel_sequence_set_virtbuf(seq, accel_buf);
1293 
1294 	return true;
1295 }
1296 
1297 struct spdk_accel_task *
1298 spdk_accel_sequence_first_task(struct spdk_accel_sequence *seq)
1299 {
1300 	return TAILQ_FIRST(&seq->tasks);
1301 }
1302 
1303 struct spdk_accel_task *
1304 spdk_accel_sequence_next_task(struct spdk_accel_task *task)
1305 {
1306 	return TAILQ_NEXT(task, seq_link);
1307 }
1308 
1309 static inline uint64_t
1310 accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt)
1311 {
1312 	uint64_t result = 0;
1313 	uint32_t i;
1314 
1315 	for (i = 0; i < iovcnt; ++i) {
1316 		result += iovs[i].iov_len;
1317 	}
1318 
1319 	return result;
1320 }
1321 
1322 static inline void
1323 accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs,
1324 			uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx,
1325 			struct accel_buffer *buf)
1326 {
1327 	bounce->orig_iovs = *iovs;
1328 	bounce->orig_iovcnt = *iovcnt;
1329 	bounce->orig_domain = *domain;
1330 	bounce->orig_domain_ctx = *domain_ctx;
1331 	bounce->iov.iov_base = buf->buf;
1332 	bounce->iov.iov_len = buf->len;
1333 
1334 	*iovs = &bounce->iov;
1335 	*iovcnt = 1;
1336 	*domain = NULL;
1337 }
1338 
1339 static void
1340 accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1341 {
1342 	struct spdk_accel_task *task;
1343 	struct accel_buffer *accel_buf;
1344 
1345 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1346 	assert(accel_buf->buf == NULL);
1347 	accel_buf->buf = buf;
1348 
1349 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1350 	assert(task != NULL);
1351 
1352 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1353 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1354 	accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain,
1355 				&task->src_domain_ctx, accel_buf);
1356 	accel_process_sequence(accel_buf->seq);
1357 }
1358 
1359 static void
1360 accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1361 {
1362 	struct spdk_accel_task *task;
1363 	struct accel_buffer *accel_buf;
1364 
1365 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1366 	assert(accel_buf->buf == NULL);
1367 	accel_buf->buf = buf;
1368 
1369 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1370 	assert(task != NULL);
1371 
1372 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1373 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1374 	accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain,
1375 				&task->dst_domain_ctx, accel_buf);
1376 	accel_process_sequence(accel_buf->seq);
1377 }
1378 
1379 static int
1380 accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1381 {
1382 	struct accel_buffer *buf;
1383 
1384 	if (task->src_domain != NULL) {
1385 		/* By the time we're here, accel buffers should have been allocated */
1386 		assert(task->src_domain != g_accel_domain);
1387 
1388 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt));
1389 		if (buf == NULL) {
1390 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1391 			return -ENOMEM;
1392 		}
1393 
1394 		TAILQ_INSERT_TAIL(&seq->bounce_bufs, buf, link);
1395 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) {
1396 			return -EAGAIN;
1397 		}
1398 
1399 		accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt,
1400 					&task->src_domain, &task->src_domain_ctx, buf);
1401 	}
1402 
1403 	if (task->dst_domain != NULL) {
1404 		/* By the time we're here, accel buffers should have been allocated */
1405 		assert(task->dst_domain != g_accel_domain);
1406 
1407 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt));
1408 		if (buf == NULL) {
1409 			/* The src buffer will be released when a sequence is completed */
1410 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1411 			return -ENOMEM;
1412 		}
1413 
1414 		TAILQ_INSERT_TAIL(&seq->bounce_bufs, buf, link);
1415 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) {
1416 			return -EAGAIN;
1417 		}
1418 
1419 		accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt,
1420 					&task->dst_domain, &task->dst_domain_ctx, buf);
1421 	}
1422 
1423 	return 0;
1424 }
1425 
1426 static void
1427 accel_task_pull_data_cb(void *ctx, int status)
1428 {
1429 	struct spdk_accel_sequence *seq = ctx;
1430 
1431 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1432 	if (spdk_likely(status == 0)) {
1433 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1434 	} else {
1435 		accel_sequence_set_fail(seq, status);
1436 	}
1437 
1438 	accel_process_sequence(seq);
1439 }
1440 
1441 static void
1442 accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1443 {
1444 	int rc;
1445 
1446 	assert(task->bounce.s.orig_iovs != NULL);
1447 	assert(task->bounce.s.orig_domain != NULL);
1448 	assert(task->bounce.s.orig_domain != g_accel_domain);
1449 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1450 
1451 	rc = spdk_memory_domain_pull_data(task->bounce.s.orig_domain,
1452 					  task->bounce.s.orig_domain_ctx,
1453 					  task->bounce.s.orig_iovs, task->bounce.s.orig_iovcnt,
1454 					  task->s.iovs, task->s.iovcnt,
1455 					  accel_task_pull_data_cb, seq);
1456 	if (spdk_unlikely(rc != 0)) {
1457 		SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n",
1458 			    spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc);
1459 		accel_sequence_set_fail(seq, rc);
1460 	}
1461 }
1462 
1463 static void
1464 accel_task_push_data_cb(void *ctx, int status)
1465 {
1466 	struct spdk_accel_sequence *seq = ctx;
1467 
1468 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1469 	if (spdk_likely(status == 0)) {
1470 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1471 	} else {
1472 		accel_sequence_set_fail(seq, status);
1473 	}
1474 
1475 	accel_process_sequence(seq);
1476 }
1477 
1478 static void
1479 accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1480 {
1481 	int rc;
1482 
1483 	assert(task->bounce.d.orig_iovs != NULL);
1484 	assert(task->bounce.d.orig_domain != NULL);
1485 	assert(task->bounce.d.orig_domain != g_accel_domain);
1486 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1487 
1488 	rc = spdk_memory_domain_push_data(task->bounce.d.orig_domain,
1489 					  task->bounce.d.orig_domain_ctx,
1490 					  task->bounce.d.orig_iovs, task->bounce.d.orig_iovcnt,
1491 					  task->d.iovs, task->d.iovcnt,
1492 					  accel_task_push_data_cb, seq);
1493 	if (spdk_unlikely(rc != 0)) {
1494 		SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n",
1495 			    spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc);
1496 		accel_sequence_set_fail(seq, rc);
1497 	}
1498 }
1499 
1500 static void
1501 accel_process_sequence(struct spdk_accel_sequence *seq)
1502 {
1503 	struct accel_io_channel *accel_ch = seq->ch;
1504 	struct spdk_accel_module_if *module;
1505 	struct spdk_io_channel *module_ch;
1506 	struct spdk_accel_task *task;
1507 	enum accel_sequence_state state;
1508 	int rc;
1509 
1510 	/* Prevent recursive calls to this function */
1511 	if (spdk_unlikely(seq->in_process_sequence)) {
1512 		return;
1513 	}
1514 	seq->in_process_sequence = true;
1515 
1516 	task = TAILQ_FIRST(&seq->tasks);
1517 	assert(task != NULL);
1518 
1519 	do {
1520 		state = seq->state;
1521 		switch (state) {
1522 		case ACCEL_SEQUENCE_STATE_INIT:
1523 			if (g_accel_driver != NULL) {
1524 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_EXEC);
1525 				break;
1526 			}
1527 		/* Fall through */
1528 		case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF:
1529 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1530 			if (!accel_sequence_check_virtbuf(seq, task)) {
1531 				/* We couldn't allocate a buffer, wait until one is available */
1532 				break;
1533 			}
1534 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1535 		/* Fall through */
1536 		case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF:
1537 			/* If a module supports memory domains, we don't need to allocate bounce
1538 			 * buffers */
1539 			if (g_modules_opc[task->op_code].supports_memory_domains) {
1540 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1541 				break;
1542 			}
1543 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1544 			rc = accel_sequence_check_bouncebuf(seq, task);
1545 			if (rc != 0) {
1546 				/* We couldn't allocate a buffer, wait until one is available */
1547 				if (rc == -EAGAIN) {
1548 					break;
1549 				}
1550 				accel_sequence_set_fail(seq, rc);
1551 				break;
1552 			}
1553 			if (task->bounce.s.orig_iovs != NULL) {
1554 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA);
1555 				break;
1556 			}
1557 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1558 		/* Fall through */
1559 		case ACCEL_SEQUENCE_STATE_EXEC_TASK:
1560 			SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n",
1561 				      g_opcode_strings[task->op_code], seq);
1562 
1563 			module = g_modules_opc[task->op_code].module;
1564 			module_ch = accel_ch->module_ch[task->op_code];
1565 
1566 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK);
1567 			rc = module->submit_tasks(module_ch, task);
1568 			if (spdk_unlikely(rc != 0)) {
1569 				SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n",
1570 					    g_opcode_strings[task->op_code], seq);
1571 				accel_sequence_set_fail(seq, rc);
1572 			}
1573 			break;
1574 		case ACCEL_SEQUENCE_STATE_PULL_DATA:
1575 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1576 			accel_task_pull_data(seq, task);
1577 			break;
1578 		case ACCEL_SEQUENCE_STATE_COMPLETE_TASK:
1579 			if (task->bounce.d.orig_iovs != NULL) {
1580 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA);
1581 				break;
1582 			}
1583 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1584 			break;
1585 		case ACCEL_SEQUENCE_STATE_PUSH_DATA:
1586 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1587 			accel_task_push_data(seq, task);
1588 			break;
1589 		case ACCEL_SEQUENCE_STATE_NEXT_TASK:
1590 			TAILQ_REMOVE(&seq->tasks, task, seq_link);
1591 			TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1592 			/* Check if there are any remaining tasks */
1593 			task = TAILQ_FIRST(&seq->tasks);
1594 			if (task == NULL) {
1595 				/* Immediately return here to make sure we don't touch the sequence
1596 				 * after it's completed */
1597 				accel_sequence_complete(seq);
1598 				return;
1599 			}
1600 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT);
1601 			break;
1602 		case ACCEL_SEQUENCE_STATE_DRIVER_EXEC:
1603 			assert(!TAILQ_EMPTY(&seq->tasks));
1604 
1605 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK);
1606 			rc = g_accel_driver->execute_sequence(seq);
1607 			if (spdk_unlikely(rc != 0)) {
1608 				SPDK_ERRLOG("Failed to execute sequence: %p using driver: %s\n",
1609 					    seq, g_accel_driver->name);
1610 				accel_sequence_set_fail(seq, rc);
1611 			}
1612 			break;
1613 		case ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE:
1614 			task = TAILQ_FIRST(&seq->tasks);
1615 			if (task == NULL) {
1616 				/* Immediately return here to make sure we don't touch the sequence
1617 				 * after it's completed */
1618 				accel_sequence_complete(seq);
1619 				return;
1620 			}
1621 			/* We don't want to execute the next task through the driver, so we
1622 			 * explicitly omit the INIT state here */
1623 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1624 			break;
1625 		case ACCEL_SEQUENCE_STATE_ERROR:
1626 			/* Immediately return here to make sure we don't touch the sequence
1627 			 * after it's completed */
1628 			assert(seq->status != 0);
1629 			accel_sequence_complete(seq);
1630 			return;
1631 		case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF:
1632 		case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF:
1633 		case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA:
1634 		case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1635 		case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA:
1636 		case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK:
1637 			break;
1638 		default:
1639 			assert(0 && "bad state");
1640 			break;
1641 		}
1642 	} while (seq->state != state);
1643 
1644 	seq->in_process_sequence = false;
1645 }
1646 
1647 static void
1648 accel_sequence_task_cb(void *cb_arg, int status)
1649 {
1650 	struct spdk_accel_sequence *seq = cb_arg;
1651 	struct spdk_accel_task *task = TAILQ_FIRST(&seq->tasks);
1652 	struct accel_io_channel *accel_ch = seq->ch;
1653 
1654 	/* spdk_accel_task_complete() puts the task back to the task pool, but we don't want to do
1655 	 * that if a task is part of a sequence.  Removing the task from that pool here is the
1656 	 * easiest way to prevent this, even though it is a bit hacky.
1657 	 */
1658 	assert(task != NULL);
1659 	TAILQ_REMOVE(&accel_ch->task_pool, task, link);
1660 
1661 	switch (seq->state) {
1662 	case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1663 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK);
1664 		if (spdk_unlikely(status != 0)) {
1665 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n",
1666 				    g_opcode_strings[task->op_code], seq);
1667 			accel_sequence_set_fail(seq, status);
1668 		}
1669 
1670 		accel_process_sequence(seq);
1671 		break;
1672 	case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK:
1673 		assert(g_accel_driver != NULL);
1674 		/* Immediately remove the task from the outstanding list to make sure the next call
1675 		 * to spdk_accel_sequence_first_task() doesn't return it */
1676 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1677 		TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1678 
1679 		if (spdk_unlikely(status != 0)) {
1680 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p through "
1681 				    "driver: %s\n", g_opcode_strings[task->op_code], seq,
1682 				    g_accel_driver->name);
1683 			/* Update status without using accel_sequence_set_fail() to avoid changing
1684 			 * seq's state to ERROR until driver calls spdk_accel_sequence_continue() */
1685 			seq->status = status;
1686 		}
1687 		break;
1688 	default:
1689 		assert(0 && "bad state");
1690 		break;
1691 	}
1692 }
1693 
1694 void
1695 spdk_accel_sequence_continue(struct spdk_accel_sequence *seq)
1696 {
1697 	assert(g_accel_driver != NULL);
1698 	assert(seq->state == ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK);
1699 
1700 	if (spdk_likely(seq->status == 0)) {
1701 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE);
1702 	} else {
1703 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
1704 	}
1705 
1706 	accel_process_sequence(seq);
1707 }
1708 
1709 static bool
1710 accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt)
1711 {
1712 	/* For now, just do a dumb check that the iovecs arrays are exactly the same */
1713 	if (iovacnt != iovbcnt) {
1714 		return false;
1715 	}
1716 
1717 	return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0;
1718 }
1719 
1720 static void
1721 accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task,
1722 			   struct spdk_accel_task **next_task)
1723 {
1724 	struct spdk_accel_task *next = *next_task;
1725 
1726 	switch (task->op_code) {
1727 	case ACCEL_OPC_COPY:
1728 		/* We only allow changing src of operations that actually have a src, e.g. we never
1729 		 * do it for fill.  Theoretically, it is possible, but we'd have to be careful to
1730 		 * change the src of the operation after fill (which in turn could also be a fill).
1731 		 * So, for the sake of simplicity, skip this type of operations for now.
1732 		 */
1733 		if (next->op_code != ACCEL_OPC_DECOMPRESS &&
1734 		    next->op_code != ACCEL_OPC_COPY &&
1735 		    next->op_code != ACCEL_OPC_ENCRYPT &&
1736 		    next->op_code != ACCEL_OPC_DECRYPT) {
1737 			break;
1738 		}
1739 		if (task->dst_domain != next->src_domain) {
1740 			break;
1741 		}
1742 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
1743 					next->s.iovs, next->s.iovcnt)) {
1744 			break;
1745 		}
1746 		next->s.iovs = task->s.iovs;
1747 		next->s.iovcnt = task->s.iovcnt;
1748 		next->src_domain = task->src_domain;
1749 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1750 		TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1751 		break;
1752 	case ACCEL_OPC_DECOMPRESS:
1753 	case ACCEL_OPC_FILL:
1754 	case ACCEL_OPC_ENCRYPT:
1755 	case ACCEL_OPC_DECRYPT:
1756 		/* We can only merge tasks when one of them is a copy */
1757 		if (next->op_code != ACCEL_OPC_COPY) {
1758 			break;
1759 		}
1760 		if (task->dst_domain != next->src_domain) {
1761 			break;
1762 		}
1763 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
1764 					next->s.iovs, next->s.iovcnt)) {
1765 			break;
1766 		}
1767 		task->d.iovs = next->d.iovs;
1768 		task->d.iovcnt = next->d.iovcnt;
1769 		task->dst_domain = next->dst_domain;
1770 		/* We're removing next_task from the tasks queue, so we need to update its pointer,
1771 		 * so that the TAILQ_FOREACH_SAFE() loop below works correctly */
1772 		*next_task = TAILQ_NEXT(next, seq_link);
1773 		TAILQ_REMOVE(&seq->tasks, next, seq_link);
1774 		TAILQ_INSERT_TAIL(&seq->completed, next, seq_link);
1775 		break;
1776 	default:
1777 		assert(0 && "bad opcode");
1778 		break;
1779 	}
1780 }
1781 
1782 int
1783 spdk_accel_sequence_finish(struct spdk_accel_sequence *seq,
1784 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
1785 {
1786 	struct spdk_accel_task *task, *next;
1787 
1788 	/* Try to remove any copy operations if possible */
1789 	TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) {
1790 		if (next == NULL) {
1791 			break;
1792 		}
1793 		accel_sequence_merge_tasks(seq, task, &next);
1794 	}
1795 
1796 	seq->cb_fn = cb_fn;
1797 	seq->cb_arg = cb_arg;
1798 
1799 	accel_process_sequence(seq);
1800 
1801 	return 0;
1802 }
1803 
1804 void
1805 spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq)
1806 {
1807 	struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks);
1808 	struct spdk_accel_task *task;
1809 
1810 	assert(TAILQ_EMPTY(&seq->completed));
1811 	TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link);
1812 
1813 	while (!TAILQ_EMPTY(&tasks)) {
1814 		task = TAILQ_FIRST(&tasks);
1815 		TAILQ_REMOVE(&tasks, task, seq_link);
1816 		TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link);
1817 	}
1818 }
1819 
1820 void
1821 spdk_accel_sequence_abort(struct spdk_accel_sequence *seq)
1822 {
1823 	if (seq == NULL) {
1824 		return;
1825 	}
1826 
1827 	accel_sequence_complete_tasks(seq);
1828 	accel_sequence_put(seq);
1829 }
1830 
1831 struct spdk_memory_domain *
1832 spdk_accel_get_memory_domain(void)
1833 {
1834 	return g_accel_domain;
1835 }
1836 
1837 static struct spdk_accel_module_if *
1838 _module_find_by_name(const char *name)
1839 {
1840 	struct spdk_accel_module_if *accel_module = NULL;
1841 
1842 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
1843 		if (strcmp(name, accel_module->name) == 0) {
1844 			break;
1845 		}
1846 	}
1847 
1848 	return accel_module;
1849 }
1850 
1851 static inline struct spdk_accel_crypto_key *
1852 _accel_crypto_key_get(const char *name)
1853 {
1854 	struct spdk_accel_crypto_key *key;
1855 
1856 	assert(spdk_spin_held(&g_keyring_spin));
1857 
1858 	TAILQ_FOREACH(key, &g_keyring, link) {
1859 		if (strcmp(name, key->param.key_name) == 0) {
1860 			return key;
1861 		}
1862 	}
1863 
1864 	return NULL;
1865 }
1866 
1867 static void
1868 accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key)
1869 {
1870 	if (key->param.hex_key) {
1871 		spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2);
1872 		free(key->param.hex_key);
1873 	}
1874 	if (key->param.hex_key2) {
1875 		spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2);
1876 		free(key->param.hex_key2);
1877 	}
1878 	free(key->param.key_name);
1879 	free(key->param.cipher);
1880 	if (key->key) {
1881 		spdk_memset_s(key->key, key->key_size, 0, key->key_size);
1882 		free(key->key);
1883 	}
1884 	if (key->key2) {
1885 		spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size);
1886 		free(key->key2);
1887 	}
1888 	free(key);
1889 }
1890 
1891 static void
1892 accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key)
1893 {
1894 	assert(key->module_if);
1895 	assert(key->module_if->crypto_key_deinit);
1896 
1897 	key->module_if->crypto_key_deinit(key);
1898 	accel_crypto_key_free_mem(key);
1899 }
1900 
1901 /*
1902  * This function mitigates a timing side channel which could be caused by using strcmp()
1903  * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in
1904  * the article [1] for more details
1905  * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html
1906  */
1907 static bool
1908 accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len)
1909 {
1910 	size_t i;
1911 	volatile size_t x = k1_len ^ k2_len;
1912 
1913 	for (i = 0; ((i < k1_len) & (i < k2_len)); i++) {
1914 		x |= k1[i] ^ k2[i];
1915 	}
1916 
1917 	return x == 0;
1918 }
1919 
1920 int
1921 spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param)
1922 {
1923 	struct spdk_accel_module_if *module;
1924 	struct spdk_accel_crypto_key *key;
1925 	size_t hex_key_size, hex_key2_size;
1926 	int rc;
1927 
1928 	if (!param || !param->hex_key || !param->cipher || !param->key_name) {
1929 		return -EINVAL;
1930 	}
1931 
1932 	if (g_modules_opc[ACCEL_OPC_ENCRYPT].module != g_modules_opc[ACCEL_OPC_DECRYPT].module) {
1933 		/* hardly ever possible, but let's check and warn the user */
1934 		SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n");
1935 	}
1936 	module = g_modules_opc[ACCEL_OPC_ENCRYPT].module;
1937 
1938 	if (!module) {
1939 		SPDK_ERRLOG("No accel module found assigned for crypto operation\n");
1940 		return -ENOENT;
1941 	}
1942 	if (!module->crypto_key_init) {
1943 		SPDK_ERRLOG("Accel module \"%s\" doesn't support crypto operations\n", module->name);
1944 		return -ENOTSUP;
1945 	}
1946 
1947 	key = calloc(1, sizeof(*key));
1948 	if (!key) {
1949 		return -ENOMEM;
1950 	}
1951 
1952 	key->param.key_name = strdup(param->key_name);
1953 	if (!key->param.key_name) {
1954 		rc = -ENOMEM;
1955 		goto error;
1956 	}
1957 
1958 	key->param.cipher = strdup(param->cipher);
1959 	if (!key->param.cipher) {
1960 		rc = -ENOMEM;
1961 		goto error;
1962 	}
1963 
1964 	hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
1965 	if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
1966 		SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
1967 		rc = -EINVAL;
1968 		goto error;
1969 	}
1970 	key->param.hex_key = strdup(param->hex_key);
1971 	if (!key->param.hex_key) {
1972 		rc = -ENOMEM;
1973 		goto error;
1974 	}
1975 
1976 	key->key_size = hex_key_size / 2;
1977 	key->key = spdk_unhexlify(key->param.hex_key);
1978 	if (!key->key) {
1979 		SPDK_ERRLOG("Failed to unhexlify key1\n");
1980 		rc = -EINVAL;
1981 		goto error;
1982 	}
1983 
1984 	if (param->hex_key2) {
1985 		hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
1986 		if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
1987 			SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
1988 			rc = -EINVAL;
1989 			goto error;
1990 		}
1991 		key->param.hex_key2 = strdup(param->hex_key2);
1992 		if (!key->param.hex_key2) {
1993 			rc = -ENOMEM;
1994 			goto error;
1995 		}
1996 
1997 		key->key2_size = hex_key2_size / 2;
1998 		key->key2 = spdk_unhexlify(key->param.hex_key2);
1999 		if (!key->key2) {
2000 			SPDK_ERRLOG("Failed to unhexlify key2\n");
2001 			rc = -EINVAL;
2002 			goto error;
2003 		}
2004 
2005 		if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) {
2006 			SPDK_ERRLOG("Identical keys are not secure\n");
2007 			rc = -EINVAL;
2008 			goto error;
2009 		}
2010 	}
2011 
2012 	key->module_if = module;
2013 
2014 	spdk_spin_lock(&g_keyring_spin);
2015 	if (_accel_crypto_key_get(param->key_name)) {
2016 		rc = -EEXIST;
2017 	} else {
2018 		rc = module->crypto_key_init(key);
2019 		if (!rc) {
2020 			TAILQ_INSERT_TAIL(&g_keyring, key, link);
2021 		}
2022 	}
2023 	spdk_spin_unlock(&g_keyring_spin);
2024 
2025 	if (rc) {
2026 		goto error;
2027 	}
2028 
2029 	return 0;
2030 
2031 error:
2032 	accel_crypto_key_free_mem(key);
2033 	return rc;
2034 }
2035 
2036 int
2037 spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key)
2038 {
2039 	if (!key || !key->module_if) {
2040 		return -EINVAL;
2041 	}
2042 
2043 	spdk_spin_lock(&g_keyring_spin);
2044 	if (!_accel_crypto_key_get(key->param.key_name)) {
2045 		spdk_spin_unlock(&g_keyring_spin);
2046 		return -ENOENT;
2047 	}
2048 	TAILQ_REMOVE(&g_keyring, key, link);
2049 	spdk_spin_unlock(&g_keyring_spin);
2050 
2051 	accel_crypto_key_destroy_unsafe(key);
2052 
2053 	return 0;
2054 }
2055 
2056 struct spdk_accel_crypto_key *
2057 spdk_accel_crypto_key_get(const char *name)
2058 {
2059 	struct spdk_accel_crypto_key *key;
2060 
2061 	spdk_spin_lock(&g_keyring_spin);
2062 	key = _accel_crypto_key_get(name);
2063 	spdk_spin_unlock(&g_keyring_spin);
2064 
2065 	return key;
2066 }
2067 
2068 /* Helper function when accel modules register with the framework. */
2069 void
2070 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module)
2071 {
2072 	if (_module_find_by_name(accel_module->name)) {
2073 		SPDK_NOTICELOG("Accel module %s already registered\n", accel_module->name);
2074 		assert(false);
2075 		return;
2076 	}
2077 
2078 	/* Make sure that the software module is at the head of the list, this
2079 	 * will assure that all opcodes are later assigned to software first and
2080 	 * then updated to HW modules as they are registered.
2081 	 */
2082 	if (strcmp(accel_module->name, "software") == 0) {
2083 		TAILQ_INSERT_HEAD(&spdk_accel_module_list, accel_module, tailq);
2084 	} else {
2085 		TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq);
2086 	}
2087 
2088 	if (accel_module->get_ctx_size && accel_module->get_ctx_size() > g_max_accel_module_size) {
2089 		g_max_accel_module_size = accel_module->get_ctx_size();
2090 	}
2091 }
2092 
2093 /* Framework level channel create callback. */
2094 static int
2095 accel_create_channel(void *io_device, void *ctx_buf)
2096 {
2097 	struct accel_io_channel	*accel_ch = ctx_buf;
2098 	struct spdk_accel_task *accel_task;
2099 	struct spdk_accel_sequence *seq;
2100 	struct accel_buffer *buf;
2101 	uint8_t *task_mem;
2102 	int i = 0, j, rc;
2103 
2104 	accel_ch->task_pool_base = calloc(MAX_TASKS_PER_CHANNEL, g_max_accel_module_size);
2105 	if (accel_ch->task_pool_base == NULL) {
2106 		return -ENOMEM;
2107 	}
2108 
2109 	accel_ch->seq_pool_base = calloc(MAX_TASKS_PER_CHANNEL, sizeof(struct spdk_accel_sequence));
2110 	if (accel_ch->seq_pool_base == NULL) {
2111 		goto err;
2112 	}
2113 
2114 	accel_ch->buf_pool_base = calloc(MAX_TASKS_PER_CHANNEL, sizeof(struct accel_buffer));
2115 	if (accel_ch->buf_pool_base == NULL) {
2116 		goto err;
2117 	}
2118 
2119 	TAILQ_INIT(&accel_ch->task_pool);
2120 	TAILQ_INIT(&accel_ch->seq_pool);
2121 	TAILQ_INIT(&accel_ch->buf_pool);
2122 	task_mem = accel_ch->task_pool_base;
2123 	for (i = 0 ; i < MAX_TASKS_PER_CHANNEL; i++) {
2124 		accel_task = (struct spdk_accel_task *)task_mem;
2125 		seq = &accel_ch->seq_pool_base[i];
2126 		buf = &accel_ch->buf_pool_base[i];
2127 		TAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link);
2128 		TAILQ_INSERT_TAIL(&accel_ch->seq_pool, seq, link);
2129 		TAILQ_INSERT_TAIL(&accel_ch->buf_pool, buf, link);
2130 		task_mem += g_max_accel_module_size;
2131 	}
2132 
2133 	/* Assign modules and get IO channels for each */
2134 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
2135 		accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel();
2136 		/* This can happen if idxd runs out of channels. */
2137 		if (accel_ch->module_ch[i] == NULL) {
2138 			goto err;
2139 		}
2140 	}
2141 
2142 	rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", ACCEL_SMALL_CACHE_SIZE,
2143 				     ACCEL_LARGE_CACHE_SIZE);
2144 	if (rc != 0) {
2145 		SPDK_ERRLOG("Failed to initialize iobuf accel channel\n");
2146 		goto err;
2147 	}
2148 
2149 	return 0;
2150 err:
2151 	for (j = 0; j < i; j++) {
2152 		spdk_put_io_channel(accel_ch->module_ch[j]);
2153 	}
2154 	free(accel_ch->task_pool_base);
2155 	free(accel_ch->seq_pool_base);
2156 	free(accel_ch->buf_pool_base);
2157 	return -ENOMEM;
2158 }
2159 
2160 /* Framework level channel destroy callback. */
2161 static void
2162 accel_destroy_channel(void *io_device, void *ctx_buf)
2163 {
2164 	struct accel_io_channel	*accel_ch = ctx_buf;
2165 	int i;
2166 
2167 	spdk_iobuf_channel_fini(&accel_ch->iobuf);
2168 
2169 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
2170 		assert(accel_ch->module_ch[i] != NULL);
2171 		spdk_put_io_channel(accel_ch->module_ch[i]);
2172 		accel_ch->module_ch[i] = NULL;
2173 	}
2174 
2175 	free(accel_ch->task_pool_base);
2176 	free(accel_ch->seq_pool_base);
2177 	free(accel_ch->buf_pool_base);
2178 }
2179 
2180 struct spdk_io_channel *
2181 spdk_accel_get_io_channel(void)
2182 {
2183 	return spdk_get_io_channel(&spdk_accel_module_list);
2184 }
2185 
2186 static void
2187 accel_module_initialize(void)
2188 {
2189 	struct spdk_accel_module_if *accel_module;
2190 
2191 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2192 		accel_module->module_init();
2193 	}
2194 }
2195 
2196 static void
2197 accel_module_init_opcode(enum accel_opcode opcode)
2198 {
2199 	struct accel_module *module = &g_modules_opc[opcode];
2200 	struct spdk_accel_module_if *module_if = module->module;
2201 
2202 	if (module_if->get_memory_domains != NULL) {
2203 		module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0;
2204 	}
2205 }
2206 
2207 int
2208 spdk_accel_initialize(void)
2209 {
2210 	enum accel_opcode op;
2211 	struct spdk_accel_module_if *accel_module = NULL;
2212 	int rc;
2213 
2214 	rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL,
2215 				       "SPDK_ACCEL_DMA_DEVICE");
2216 	if (rc != 0) {
2217 		SPDK_ERRLOG("Failed to create accel memory domain\n");
2218 		return rc;
2219 	}
2220 
2221 	spdk_spin_init(&g_keyring_spin);
2222 
2223 	g_modules_started = true;
2224 	accel_module_initialize();
2225 
2226 	/* Create our priority global map of opcodes to modules, we populate starting
2227 	 * with the software module (guaranteed to be first on the list) and then
2228 	 * updating opcodes with HW modules that have been initialized.
2229 	 * NOTE: all opcodes must be supported by software in the event that no HW
2230 	 * modules are initialized to support the operation.
2231 	 */
2232 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2233 		for (op = 0; op < ACCEL_OPC_LAST; op++) {
2234 			if (accel_module->supports_opcode(op)) {
2235 				g_modules_opc[op].module = accel_module;
2236 				SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name);
2237 			}
2238 		}
2239 	}
2240 
2241 	/* Now lets check for overrides and apply all that exist */
2242 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
2243 		if (g_modules_opc_override[op] != NULL) {
2244 			accel_module = _module_find_by_name(g_modules_opc_override[op]);
2245 			if (accel_module == NULL) {
2246 				SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]);
2247 				rc = -EINVAL;
2248 				goto error;
2249 			}
2250 			if (accel_module->supports_opcode(op) == false) {
2251 				SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op);
2252 				rc = -EINVAL;
2253 				goto error;
2254 			}
2255 			g_modules_opc[op].module = accel_module;
2256 		}
2257 	}
2258 
2259 	if (g_modules_opc[ACCEL_OPC_ENCRYPT].module != g_modules_opc[ACCEL_OPC_DECRYPT].module) {
2260 		SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations");
2261 		rc = -EINVAL;
2262 		goto error;
2263 	}
2264 
2265 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
2266 		assert(g_modules_opc[op].module != NULL);
2267 		accel_module_init_opcode(op);
2268 	}
2269 
2270 	rc = spdk_iobuf_register_module("accel");
2271 	if (rc != 0) {
2272 		SPDK_ERRLOG("Failed to register accel iobuf module\n");
2273 		goto error;
2274 	}
2275 
2276 	/*
2277 	 * We need a unique identifier for the accel framework, so use the
2278 	 * spdk_accel_module_list address for this purpose.
2279 	 */
2280 	spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel,
2281 				sizeof(struct accel_io_channel), "accel");
2282 
2283 	return 0;
2284 error:
2285 	spdk_memory_domain_destroy(g_accel_domain);
2286 
2287 	return rc;
2288 }
2289 
2290 static void
2291 accel_module_finish_cb(void)
2292 {
2293 	spdk_accel_fini_cb cb_fn = g_fini_cb_fn;
2294 
2295 	spdk_memory_domain_destroy(g_accel_domain);
2296 
2297 	cb_fn(g_fini_cb_arg);
2298 	g_fini_cb_fn = NULL;
2299 	g_fini_cb_arg = NULL;
2300 }
2301 
2302 static void
2303 accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str,
2304 			   const char *module_str)
2305 {
2306 	spdk_json_write_object_begin(w);
2307 	spdk_json_write_named_string(w, "method", "accel_assign_opc");
2308 	spdk_json_write_named_object_begin(w, "params");
2309 	spdk_json_write_named_string(w, "opname", opc_str);
2310 	spdk_json_write_named_string(w, "module", module_str);
2311 	spdk_json_write_object_end(w);
2312 	spdk_json_write_object_end(w);
2313 }
2314 
2315 static void
2316 __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2317 {
2318 	spdk_json_write_named_string(w, "name", key->param.key_name);
2319 	spdk_json_write_named_string(w, "cipher", key->param.cipher);
2320 	spdk_json_write_named_string(w, "key", key->param.hex_key);
2321 	if (key->param.hex_key2) {
2322 		spdk_json_write_named_string(w, "key2", key->param.hex_key2);
2323 	}
2324 }
2325 
2326 void
2327 _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2328 {
2329 	spdk_json_write_object_begin(w);
2330 	__accel_crypto_key_dump_param(w, key);
2331 	spdk_json_write_object_end(w);
2332 }
2333 
2334 static void
2335 _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w,
2336 				    struct spdk_accel_crypto_key *key)
2337 {
2338 	spdk_json_write_object_begin(w);
2339 	spdk_json_write_named_string(w, "method", "accel_crypto_key_create");
2340 	spdk_json_write_named_object_begin(w, "params");
2341 	__accel_crypto_key_dump_param(w, key);
2342 	spdk_json_write_object_end(w);
2343 	spdk_json_write_object_end(w);
2344 }
2345 
2346 static void
2347 _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump)
2348 {
2349 	struct spdk_accel_crypto_key *key;
2350 
2351 	spdk_spin_lock(&g_keyring_spin);
2352 	TAILQ_FOREACH(key, &g_keyring, link) {
2353 		if (full_dump) {
2354 			_accel_crypto_key_write_config_json(w, key);
2355 		} else {
2356 			_accel_crypto_key_dump_param(w, key);
2357 		}
2358 	}
2359 	spdk_spin_unlock(&g_keyring_spin);
2360 }
2361 
2362 void
2363 _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w)
2364 {
2365 	_accel_crypto_keys_write_config_json(w, false);
2366 }
2367 
2368 void
2369 spdk_accel_write_config_json(struct spdk_json_write_ctx *w)
2370 {
2371 	struct spdk_accel_module_if *accel_module;
2372 	int i;
2373 
2374 	/*
2375 	 * The accel fw has no config, there may be some in
2376 	 * the modules though.
2377 	 */
2378 	spdk_json_write_array_begin(w);
2379 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2380 		if (accel_module->write_config_json) {
2381 			accel_module->write_config_json(w);
2382 		}
2383 	}
2384 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
2385 		if (g_modules_opc_override[i]) {
2386 			accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]);
2387 		}
2388 	}
2389 
2390 	_accel_crypto_keys_write_config_json(w, true);
2391 
2392 	spdk_json_write_array_end(w);
2393 }
2394 
2395 void
2396 spdk_accel_module_finish(void)
2397 {
2398 	if (!g_accel_module) {
2399 		g_accel_module = TAILQ_FIRST(&spdk_accel_module_list);
2400 	} else {
2401 		g_accel_module = TAILQ_NEXT(g_accel_module, tailq);
2402 	}
2403 
2404 	if (!g_accel_module) {
2405 		spdk_spin_destroy(&g_keyring_spin);
2406 		accel_module_finish_cb();
2407 		return;
2408 	}
2409 
2410 	if (g_accel_module->module_fini) {
2411 		spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL);
2412 	} else {
2413 		spdk_accel_module_finish();
2414 	}
2415 }
2416 
2417 void
2418 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg)
2419 {
2420 	struct spdk_accel_crypto_key *key, *key_tmp;
2421 	enum accel_opcode op;
2422 
2423 	assert(cb_fn != NULL);
2424 
2425 	g_fini_cb_fn = cb_fn;
2426 	g_fini_cb_arg = cb_arg;
2427 
2428 	spdk_spin_lock(&g_keyring_spin);
2429 	TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) {
2430 		accel_crypto_key_destroy_unsafe(key);
2431 	}
2432 	spdk_spin_unlock(&g_keyring_spin);
2433 
2434 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
2435 		if (g_modules_opc_override[op] != NULL) {
2436 			free(g_modules_opc_override[op]);
2437 			g_modules_opc_override[op] = NULL;
2438 		}
2439 		g_modules_opc[op].module = NULL;
2440 	}
2441 
2442 	spdk_io_device_unregister(&spdk_accel_module_list, NULL);
2443 	spdk_accel_module_finish();
2444 }
2445 
2446 static struct spdk_accel_driver *
2447 accel_find_driver(const char *name)
2448 {
2449 	struct spdk_accel_driver *driver;
2450 
2451 	TAILQ_FOREACH(driver, &g_accel_drivers, tailq) {
2452 		if (strcmp(driver->name, name) == 0) {
2453 			return driver;
2454 		}
2455 	}
2456 
2457 	return NULL;
2458 }
2459 
2460 int
2461 spdk_accel_set_driver(const char *name)
2462 {
2463 	struct spdk_accel_driver *driver;
2464 
2465 	driver = accel_find_driver(name);
2466 	if (driver == NULL) {
2467 		SPDK_ERRLOG("Couldn't find driver named '%s'\n", name);
2468 		return -ENODEV;
2469 	}
2470 
2471 	g_accel_driver = driver;
2472 
2473 	return 0;
2474 }
2475 
2476 void
2477 spdk_accel_driver_register(struct spdk_accel_driver *driver)
2478 {
2479 	if (accel_find_driver(driver->name)) {
2480 		SPDK_ERRLOG("Driver named '%s' has already been registered\n", driver->name);
2481 		assert(0);
2482 		return;
2483 	}
2484 
2485 	TAILQ_INSERT_TAIL(&g_accel_drivers, driver, tailq);
2486 }
2487 
2488 SPDK_LOG_REGISTER_COMPONENT(accel)
2489