xref: /spdk/lib/accel/accel.c (revision 9b8579e4af16f0826736c09db858e0737f7556c5)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2020 Intel Corporation.
3  *   Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES.
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/accel_module.h"
10 
11 #include "accel_internal.h"
12 
13 #include "spdk/dma.h"
14 #include "spdk/env.h"
15 #include "spdk/likely.h"
16 #include "spdk/log.h"
17 #include "spdk/thread.h"
18 #include "spdk/json.h"
19 #include "spdk/crc32.h"
20 #include "spdk/util.h"
21 #include "spdk/hexlify.h"
22 #include "spdk/string.h"
23 
24 /* Accelerator Framework: The following provides a top level
25  * generic API for the accelerator functions defined here. Modules,
26  * such as the one in /module/accel/ioat, supply the implementation
27  * with the exception of the pure software implementation contained
28  * later in this file.
29  */
30 
31 #define ALIGN_4K			0x1000
32 #define MAX_TASKS_PER_CHANNEL		0x800
33 #define ACCEL_SMALL_CACHE_SIZE		128
34 #define ACCEL_LARGE_CACHE_SIZE		16
35 /* Set MSB, so we don't return NULL pointers as buffers */
36 #define ACCEL_BUFFER_BASE		((void *)(1ull << 63))
37 #define ACCEL_BUFFER_OFFSET_MASK	((uintptr_t)ACCEL_BUFFER_BASE - 1)
38 
39 #define ACCEL_CRYPTO_TWEAK_MODE_DEFAULT	SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA
40 
41 struct accel_module {
42 	struct spdk_accel_module_if	*module;
43 	bool				supports_memory_domains;
44 };
45 
46 /* Largest context size for all accel modules */
47 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task);
48 
49 static struct spdk_accel_module_if *g_accel_module = NULL;
50 static spdk_accel_fini_cb g_fini_cb_fn = NULL;
51 static void *g_fini_cb_arg = NULL;
52 static bool g_modules_started = false;
53 static struct spdk_memory_domain *g_accel_domain;
54 
55 /* Global list of registered accelerator modules */
56 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list =
57 	TAILQ_HEAD_INITIALIZER(spdk_accel_module_list);
58 
59 /* Crypto keyring */
60 static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring);
61 static struct spdk_spinlock g_keyring_spin;
62 
63 /* Global array mapping capabilities to modules */
64 static struct accel_module g_modules_opc[SPDK_ACCEL_OPC_LAST] = {};
65 static char *g_modules_opc_override[SPDK_ACCEL_OPC_LAST] = {};
66 TAILQ_HEAD(, spdk_accel_driver) g_accel_drivers = TAILQ_HEAD_INITIALIZER(g_accel_drivers);
67 static struct spdk_accel_driver *g_accel_driver;
68 static struct spdk_accel_opts g_opts = {
69 	.small_cache_size = ACCEL_SMALL_CACHE_SIZE,
70 	.large_cache_size = ACCEL_LARGE_CACHE_SIZE,
71 	.task_count = MAX_TASKS_PER_CHANNEL,
72 	.sequence_count = MAX_TASKS_PER_CHANNEL,
73 	.buf_count = MAX_TASKS_PER_CHANNEL,
74 };
75 static struct accel_stats g_stats;
76 static struct spdk_spinlock g_stats_lock;
77 
78 static const char *g_opcode_strings[SPDK_ACCEL_OPC_LAST] = {
79 	"copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c",
80 	"compress", "decompress", "encrypt", "decrypt", "xor",
81 	"dif_verify", "dif_generate", "dif_generate_copy"
82 };
83 
84 enum accel_sequence_state {
85 	ACCEL_SEQUENCE_STATE_INIT,
86 	ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF,
87 	ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF,
88 	ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF,
89 	ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF,
90 	ACCEL_SEQUENCE_STATE_PULL_DATA,
91 	ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA,
92 	ACCEL_SEQUENCE_STATE_EXEC_TASK,
93 	ACCEL_SEQUENCE_STATE_AWAIT_TASK,
94 	ACCEL_SEQUENCE_STATE_COMPLETE_TASK,
95 	ACCEL_SEQUENCE_STATE_NEXT_TASK,
96 	ACCEL_SEQUENCE_STATE_PUSH_DATA,
97 	ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA,
98 	ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS,
99 	ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS,
100 	ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS,
101 	ACCEL_SEQUENCE_STATE_ERROR,
102 	ACCEL_SEQUENCE_STATE_MAX,
103 };
104 
105 static const char *g_seq_states[]
106 __attribute__((unused)) = {
107 	[ACCEL_SEQUENCE_STATE_INIT] = "init",
108 	[ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf",
109 	[ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf",
110 	[ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf",
111 	[ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf",
112 	[ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data",
113 	[ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data",
114 	[ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task",
115 	[ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task",
116 	[ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task",
117 	[ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task",
118 	[ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data",
119 	[ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data",
120 	[ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS] = "driver-exec-tasks",
121 	[ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS] = "driver-await-tasks",
122 	[ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS] = "driver-complete-tasks",
123 	[ACCEL_SEQUENCE_STATE_ERROR] = "error",
124 	[ACCEL_SEQUENCE_STATE_MAX] = "",
125 };
126 
127 #define ACCEL_SEQUENCE_STATE_STRING(s) \
128 	(((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \
129 	 ? g_seq_states[s] : "unknown")
130 
131 struct accel_buffer {
132 	struct spdk_accel_sequence	*seq;
133 	void				*buf;
134 	uint64_t			len;
135 	struct spdk_iobuf_entry		iobuf;
136 	spdk_accel_sequence_get_buf_cb	cb_fn;
137 	void				*cb_ctx;
138 	SLIST_ENTRY(accel_buffer)	link;
139 	struct accel_io_channel		*ch;
140 };
141 
142 struct accel_io_channel {
143 	struct spdk_io_channel			*module_ch[SPDK_ACCEL_OPC_LAST];
144 	struct spdk_io_channel			*driver_channel;
145 	void					*task_pool_base;
146 	struct spdk_accel_sequence		*seq_pool_base;
147 	struct accel_buffer			*buf_pool_base;
148 	struct spdk_accel_task_aux_data		*task_aux_data_base;
149 	STAILQ_HEAD(, spdk_accel_task)		task_pool;
150 	SLIST_HEAD(, spdk_accel_task_aux_data)	task_aux_data_pool;
151 	SLIST_HEAD(, spdk_accel_sequence)	seq_pool;
152 	SLIST_HEAD(, accel_buffer)		buf_pool;
153 	struct spdk_iobuf_channel		iobuf;
154 	struct accel_stats			stats;
155 };
156 
157 TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task);
158 
159 struct spdk_accel_sequence {
160 	struct accel_io_channel			*ch;
161 	struct accel_sequence_tasks		tasks;
162 	SLIST_HEAD(, accel_buffer)		bounce_bufs;
163 	int					status;
164 	/* state uses enum accel_sequence_state */
165 	uint8_t					state;
166 	bool					in_process_sequence;
167 	spdk_accel_completion_cb		cb_fn;
168 	void					*cb_arg;
169 	SLIST_ENTRY(spdk_accel_sequence)	link;
170 };
171 SPDK_STATIC_ASSERT(sizeof(struct spdk_accel_sequence) == 64, "invalid size");
172 
173 #define accel_update_stats(ch, event, v) \
174 	do { \
175 		(ch)->stats.event += (v); \
176 	} while (0)
177 
178 #define accel_update_task_stats(ch, task, event, v) \
179 	accel_update_stats(ch, operations[(task)->op_code].event, v)
180 
181 static inline void accel_sequence_task_cb(void *cb_arg, int status);
182 
183 static inline void
184 accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state)
185 {
186 	SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq,
187 		      ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state));
188 	assert(seq->state != ACCEL_SEQUENCE_STATE_ERROR || state == ACCEL_SEQUENCE_STATE_ERROR);
189 	seq->state = state;
190 }
191 
192 static void
193 accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status)
194 {
195 	accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
196 	assert(status != 0);
197 	seq->status = status;
198 }
199 
200 int
201 spdk_accel_get_opc_module_name(enum spdk_accel_opcode opcode, const char **module_name)
202 {
203 	if (opcode >= SPDK_ACCEL_OPC_LAST) {
204 		/* invalid opcode */
205 		return -EINVAL;
206 	}
207 
208 	if (g_modules_opc[opcode].module) {
209 		*module_name = g_modules_opc[opcode].module->name;
210 	} else {
211 		return -ENOENT;
212 	}
213 
214 	return 0;
215 }
216 
217 void
218 _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn)
219 {
220 	struct spdk_accel_module_if *accel_module;
221 	enum spdk_accel_opcode opcode;
222 	int j = 0;
223 
224 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
225 		for (opcode = 0; opcode < SPDK_ACCEL_OPC_LAST; opcode++) {
226 			if (accel_module->supports_opcode(opcode)) {
227 				info->ops[j] = opcode;
228 				j++;
229 			}
230 		}
231 		info->name = accel_module->name;
232 		info->num_ops = j;
233 		fn(info);
234 		j = 0;
235 	}
236 }
237 
238 const char *
239 spdk_accel_get_opcode_name(enum spdk_accel_opcode opcode)
240 {
241 	if (opcode < SPDK_ACCEL_OPC_LAST) {
242 		return g_opcode_strings[opcode];
243 	}
244 
245 	return NULL;
246 }
247 
248 int
249 spdk_accel_assign_opc(enum spdk_accel_opcode opcode, const char *name)
250 {
251 	char *copy;
252 
253 	if (g_modules_started == true) {
254 		/* we don't allow re-assignment once things have started */
255 		return -EINVAL;
256 	}
257 
258 	if (opcode >= SPDK_ACCEL_OPC_LAST) {
259 		/* invalid opcode */
260 		return -EINVAL;
261 	}
262 
263 	copy = strdup(name);
264 	if (copy == NULL) {
265 		return -ENOMEM;
266 	}
267 
268 	/* module selection will be validated after the framework starts. */
269 	free(g_modules_opc_override[opcode]);
270 	g_modules_opc_override[opcode] = copy;
271 
272 	return 0;
273 }
274 
275 void
276 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status)
277 {
278 	struct accel_io_channel		*accel_ch = accel_task->accel_ch;
279 	spdk_accel_completion_cb	cb_fn;
280 	void				*cb_arg;
281 
282 	accel_update_task_stats(accel_ch, accel_task, executed, 1);
283 	accel_update_task_stats(accel_ch, accel_task, num_bytes, accel_task->nbytes);
284 	if (spdk_unlikely(status != 0)) {
285 		accel_update_task_stats(accel_ch, accel_task, failed, 1);
286 	}
287 
288 	if (accel_task->seq) {
289 		accel_sequence_task_cb(accel_task->seq, status);
290 		return;
291 	}
292 
293 	cb_fn = accel_task->cb_fn;
294 	cb_arg = accel_task->cb_arg;
295 
296 	if (accel_task->has_aux) {
297 		SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task->aux, link);
298 		accel_task->aux = NULL;
299 		accel_task->has_aux = false;
300 	}
301 
302 	/* We should put the accel_task into the list firstly in order to avoid
303 	 * the accel task list is exhausted when there is recursive call to
304 	 * allocate accel_task in user's call back function (cb_fn)
305 	 */
306 	STAILQ_INSERT_HEAD(&accel_ch->task_pool, accel_task, link);
307 
308 	cb_fn(cb_arg, status);
309 }
310 
311 inline static struct spdk_accel_task *
312 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg)
313 {
314 	struct spdk_accel_task *accel_task;
315 
316 	accel_task = STAILQ_FIRST(&accel_ch->task_pool);
317 	if (spdk_unlikely(accel_task == NULL)) {
318 		accel_update_stats(accel_ch, retry.task, 1);
319 		return NULL;
320 	}
321 
322 	STAILQ_REMOVE_HEAD(&accel_ch->task_pool, link);
323 	accel_task->link.stqe_next = NULL;
324 
325 	accel_task->cb_fn = cb_fn;
326 	accel_task->cb_arg = cb_arg;
327 	accel_task->accel_ch = accel_ch;
328 	accel_task->s.iovs = NULL;
329 	accel_task->d.iovs = NULL;
330 
331 	return accel_task;
332 }
333 
334 static inline int
335 accel_submit_task(struct accel_io_channel *accel_ch, struct spdk_accel_task *task)
336 {
337 	struct spdk_io_channel *module_ch = accel_ch->module_ch[task->op_code];
338 	struct spdk_accel_module_if *module = g_modules_opc[task->op_code].module;
339 	int rc;
340 
341 	rc = module->submit_tasks(module_ch, task);
342 	if (spdk_unlikely(rc != 0)) {
343 		accel_update_task_stats(accel_ch, task, failed, 1);
344 	}
345 
346 	return rc;
347 }
348 
349 static inline uint64_t
350 accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt)
351 {
352 	uint64_t result = 0;
353 	uint32_t i;
354 
355 	for (i = 0; i < iovcnt; ++i) {
356 		result += iovs[i].iov_len;
357 	}
358 
359 	return result;
360 }
361 
362 #define ACCEL_TASK_ALLOC_AUX_BUF(task)						\
363 do {										\
364         (task)->aux = SLIST_FIRST(&(task)->accel_ch->task_aux_data_pool);	\
365         if (spdk_unlikely(!(task)->aux)) {					\
366                 SPDK_ERRLOG("Fatal problem, aux data was not allocated\n");	\
367                 STAILQ_INSERT_HEAD(&(task)->accel_ch->task_pool, (task), link);	\
368                 assert(0);							\
369                 return -ENOMEM;							\
370         }									\
371         SLIST_REMOVE_HEAD(&(task)->accel_ch->task_aux_data_pool, link);		\
372         (task)->has_aux = true;							\
373 } while (0)
374 
375 /* Accel framework public API for copy function */
376 int
377 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src,
378 		       uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
379 {
380 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
381 	struct spdk_accel_task *accel_task;
382 
383 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
384 	if (spdk_unlikely(accel_task == NULL)) {
385 		return -ENOMEM;
386 	}
387 
388 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
389 
390 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
391 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
392 	accel_task->d.iovs[0].iov_base = dst;
393 	accel_task->d.iovs[0].iov_len = nbytes;
394 	accel_task->d.iovcnt = 1;
395 	accel_task->s.iovs[0].iov_base = src;
396 	accel_task->s.iovs[0].iov_len = nbytes;
397 	accel_task->s.iovcnt = 1;
398 	accel_task->nbytes = nbytes;
399 	accel_task->op_code = SPDK_ACCEL_OPC_COPY;
400 	accel_task->src_domain = NULL;
401 	accel_task->dst_domain = NULL;
402 
403 	return accel_submit_task(accel_ch, accel_task);
404 }
405 
406 /* Accel framework public API for dual cast copy function */
407 int
408 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1,
409 			   void *dst2, void *src, uint64_t nbytes,
410 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
411 {
412 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
413 	struct spdk_accel_task *accel_task;
414 
415 	if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) {
416 		SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n");
417 		return -EINVAL;
418 	}
419 
420 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
421 	if (spdk_unlikely(accel_task == NULL)) {
422 		return -ENOMEM;
423 	}
424 
425 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
426 
427 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
428 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
429 	accel_task->d2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST2];
430 	accel_task->d.iovs[0].iov_base = dst1;
431 	accel_task->d.iovs[0].iov_len = nbytes;
432 	accel_task->d.iovcnt = 1;
433 	accel_task->d2.iovs[0].iov_base = dst2;
434 	accel_task->d2.iovs[0].iov_len = nbytes;
435 	accel_task->d2.iovcnt = 1;
436 	accel_task->s.iovs[0].iov_base = src;
437 	accel_task->s.iovs[0].iov_len = nbytes;
438 	accel_task->s.iovcnt = 1;
439 	accel_task->nbytes = nbytes;
440 	accel_task->op_code = SPDK_ACCEL_OPC_DUALCAST;
441 	accel_task->src_domain = NULL;
442 	accel_task->dst_domain = NULL;
443 
444 	return accel_submit_task(accel_ch, accel_task);
445 }
446 
447 /* Accel framework public API for compare function */
448 
449 int
450 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1,
451 			  void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
452 			  void *cb_arg)
453 {
454 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
455 	struct spdk_accel_task *accel_task;
456 
457 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
458 	if (spdk_unlikely(accel_task == NULL)) {
459 		return -ENOMEM;
460 	}
461 
462 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
463 
464 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
465 	accel_task->s2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC2];
466 	accel_task->s.iovs[0].iov_base = src1;
467 	accel_task->s.iovs[0].iov_len = nbytes;
468 	accel_task->s.iovcnt = 1;
469 	accel_task->s2.iovs[0].iov_base = src2;
470 	accel_task->s2.iovs[0].iov_len = nbytes;
471 	accel_task->s2.iovcnt = 1;
472 	accel_task->nbytes = nbytes;
473 	accel_task->op_code = SPDK_ACCEL_OPC_COMPARE;
474 	accel_task->src_domain = NULL;
475 	accel_task->dst_domain = NULL;
476 
477 	return accel_submit_task(accel_ch, accel_task);
478 }
479 
480 /* Accel framework public API for fill function */
481 int
482 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst,
483 		       uint8_t fill, uint64_t nbytes,
484 		       spdk_accel_completion_cb cb_fn, void *cb_arg)
485 {
486 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
487 	struct spdk_accel_task *accel_task;
488 
489 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
490 	if (spdk_unlikely(accel_task == NULL)) {
491 		return -ENOMEM;
492 	}
493 
494 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
495 
496 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
497 	accel_task->d.iovs[0].iov_base = dst;
498 	accel_task->d.iovs[0].iov_len = nbytes;
499 	accel_task->d.iovcnt = 1;
500 	accel_task->nbytes = nbytes;
501 	memset(&accel_task->fill_pattern, fill, sizeof(uint64_t));
502 	accel_task->op_code = SPDK_ACCEL_OPC_FILL;
503 	accel_task->src_domain = NULL;
504 	accel_task->dst_domain = NULL;
505 
506 	return accel_submit_task(accel_ch, accel_task);
507 }
508 
509 /* Accel framework public API for CRC-32C function */
510 int
511 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst,
512 			 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
513 			 void *cb_arg)
514 {
515 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
516 	struct spdk_accel_task *accel_task;
517 
518 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
519 	if (spdk_unlikely(accel_task == NULL)) {
520 		return -ENOMEM;
521 	}
522 
523 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
524 
525 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
526 	accel_task->s.iovs[0].iov_base = src;
527 	accel_task->s.iovs[0].iov_len = nbytes;
528 	accel_task->s.iovcnt = 1;
529 	accel_task->nbytes = nbytes;
530 	accel_task->crc_dst = crc_dst;
531 	accel_task->seed = seed;
532 	accel_task->op_code = SPDK_ACCEL_OPC_CRC32C;
533 	accel_task->src_domain = NULL;
534 	accel_task->dst_domain = NULL;
535 
536 	return accel_submit_task(accel_ch, accel_task);
537 }
538 
539 /* Accel framework public API for chained CRC-32C function */
540 int
541 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst,
542 			  struct iovec *iov, uint32_t iov_cnt, uint32_t seed,
543 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
544 {
545 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
546 	struct spdk_accel_task *accel_task;
547 
548 	if (iov == NULL) {
549 		SPDK_ERRLOG("iov should not be NULL");
550 		return -EINVAL;
551 	}
552 
553 	if (!iov_cnt) {
554 		SPDK_ERRLOG("iovcnt should not be zero value\n");
555 		return -EINVAL;
556 	}
557 
558 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
559 	if (spdk_unlikely(accel_task == NULL)) {
560 		SPDK_ERRLOG("no memory\n");
561 		assert(0);
562 		return -ENOMEM;
563 	}
564 
565 	accel_task->s.iovs = iov;
566 	accel_task->s.iovcnt = iov_cnt;
567 	accel_task->nbytes = accel_get_iovlen(iov, iov_cnt);
568 	accel_task->crc_dst = crc_dst;
569 	accel_task->seed = seed;
570 	accel_task->op_code = SPDK_ACCEL_OPC_CRC32C;
571 	accel_task->src_domain = NULL;
572 	accel_task->dst_domain = NULL;
573 
574 	return accel_submit_task(accel_ch, accel_task);
575 }
576 
577 /* Accel framework public API for copy with CRC-32C function */
578 int
579 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst,
580 			      void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes,
581 			      spdk_accel_completion_cb cb_fn, void *cb_arg)
582 {
583 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
584 	struct spdk_accel_task *accel_task;
585 
586 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
587 	if (spdk_unlikely(accel_task == NULL)) {
588 		return -ENOMEM;
589 	}
590 
591 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
592 
593 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
594 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
595 	accel_task->d.iovs[0].iov_base = dst;
596 	accel_task->d.iovs[0].iov_len = nbytes;
597 	accel_task->d.iovcnt = 1;
598 	accel_task->s.iovs[0].iov_base = src;
599 	accel_task->s.iovs[0].iov_len = nbytes;
600 	accel_task->s.iovcnt = 1;
601 	accel_task->nbytes = nbytes;
602 	accel_task->crc_dst = crc_dst;
603 	accel_task->seed = seed;
604 	accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C;
605 	accel_task->src_domain = NULL;
606 	accel_task->dst_domain = NULL;
607 
608 	return accel_submit_task(accel_ch, accel_task);
609 }
610 
611 /* Accel framework public API for chained copy + CRC-32C function */
612 int
613 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst,
614 			       struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst,
615 			       uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg)
616 {
617 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
618 	struct spdk_accel_task *accel_task;
619 	uint64_t nbytes;
620 
621 	if (src_iovs == NULL) {
622 		SPDK_ERRLOG("iov should not be NULL");
623 		return -EINVAL;
624 	}
625 
626 	if (!iov_cnt) {
627 		SPDK_ERRLOG("iovcnt should not be zero value\n");
628 		return -EINVAL;
629 	}
630 
631 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
632 	if (spdk_unlikely(accel_task == NULL)) {
633 		SPDK_ERRLOG("no memory\n");
634 		assert(0);
635 		return -ENOMEM;
636 	}
637 
638 	nbytes = accel_get_iovlen(src_iovs, iov_cnt);
639 
640 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
641 
642 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
643 	accel_task->d.iovs[0].iov_base = dst;
644 	accel_task->d.iovs[0].iov_len = nbytes;
645 	accel_task->d.iovcnt = 1;
646 	accel_task->s.iovs = src_iovs;
647 	accel_task->s.iovcnt = iov_cnt;
648 	accel_task->nbytes = nbytes;
649 	accel_task->crc_dst = crc_dst;
650 	accel_task->seed = seed;
651 	accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C;
652 	accel_task->src_domain = NULL;
653 	accel_task->dst_domain = NULL;
654 
655 	return accel_submit_task(accel_ch, accel_task);
656 }
657 
658 int
659 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes,
660 			   struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size,
661 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
662 {
663 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
664 	struct spdk_accel_task *accel_task;
665 
666 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
667 	if (spdk_unlikely(accel_task == NULL)) {
668 		return -ENOMEM;
669 	}
670 
671 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
672 
673 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
674 	accel_task->d.iovs[0].iov_base = dst;
675 	accel_task->d.iovs[0].iov_len = nbytes;
676 	accel_task->d.iovcnt = 1;
677 	accel_task->output_size = output_size;
678 	accel_task->s.iovs = src_iovs;
679 	accel_task->s.iovcnt = src_iovcnt;
680 	accel_task->nbytes = nbytes;
681 	accel_task->op_code = SPDK_ACCEL_OPC_COMPRESS;
682 	accel_task->src_domain = NULL;
683 	accel_task->dst_domain = NULL;
684 
685 	return accel_submit_task(accel_ch, accel_task);
686 }
687 
688 int
689 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs,
690 			     size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
691 			     uint32_t *output_size, spdk_accel_completion_cb cb_fn,
692 			     void *cb_arg)
693 {
694 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
695 	struct spdk_accel_task *accel_task;
696 
697 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
698 	if (spdk_unlikely(accel_task == NULL)) {
699 		return -ENOMEM;
700 	}
701 
702 	accel_task->output_size = output_size;
703 	accel_task->s.iovs = src_iovs;
704 	accel_task->s.iovcnt = src_iovcnt;
705 	accel_task->d.iovs = dst_iovs;
706 	accel_task->d.iovcnt = dst_iovcnt;
707 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
708 	accel_task->op_code = SPDK_ACCEL_OPC_DECOMPRESS;
709 	accel_task->src_domain = NULL;
710 	accel_task->dst_domain = NULL;
711 
712 	return accel_submit_task(accel_ch, accel_task);
713 }
714 
715 int
716 spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
717 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
718 			  struct iovec *src_iovs, uint32_t src_iovcnt,
719 			  uint64_t iv, uint32_t block_size,
720 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
721 {
722 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
723 	struct spdk_accel_task *accel_task;
724 
725 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
726 		return -EINVAL;
727 	}
728 
729 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
730 	if (spdk_unlikely(accel_task == NULL)) {
731 		return -ENOMEM;
732 	}
733 
734 	accel_task->crypto_key = key;
735 	accel_task->s.iovs = src_iovs;
736 	accel_task->s.iovcnt = src_iovcnt;
737 	accel_task->d.iovs = dst_iovs;
738 	accel_task->d.iovcnt = dst_iovcnt;
739 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
740 	accel_task->iv = iv;
741 	accel_task->block_size = block_size;
742 	accel_task->op_code = SPDK_ACCEL_OPC_ENCRYPT;
743 	accel_task->src_domain = NULL;
744 	accel_task->dst_domain = NULL;
745 
746 	return accel_submit_task(accel_ch, accel_task);
747 }
748 
749 int
750 spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
751 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
752 			  struct iovec *src_iovs, uint32_t src_iovcnt,
753 			  uint64_t iv, uint32_t block_size,
754 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
755 {
756 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
757 	struct spdk_accel_task *accel_task;
758 
759 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
760 		return -EINVAL;
761 	}
762 
763 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
764 	if (spdk_unlikely(accel_task == NULL)) {
765 		return -ENOMEM;
766 	}
767 
768 	accel_task->crypto_key = key;
769 	accel_task->s.iovs = src_iovs;
770 	accel_task->s.iovcnt = src_iovcnt;
771 	accel_task->d.iovs = dst_iovs;
772 	accel_task->d.iovcnt = dst_iovcnt;
773 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
774 	accel_task->iv = iv;
775 	accel_task->block_size = block_size;
776 	accel_task->op_code = SPDK_ACCEL_OPC_DECRYPT;
777 	accel_task->src_domain = NULL;
778 	accel_task->dst_domain = NULL;
779 
780 	return accel_submit_task(accel_ch, accel_task);
781 }
782 
783 int
784 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs,
785 		      uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
786 {
787 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
788 	struct spdk_accel_task *accel_task;
789 
790 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
791 	if (spdk_unlikely(accel_task == NULL)) {
792 		return -ENOMEM;
793 	}
794 
795 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
796 
797 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
798 	accel_task->nsrcs.srcs = sources;
799 	accel_task->nsrcs.cnt = nsrcs;
800 	accel_task->d.iovs[0].iov_base = dst;
801 	accel_task->d.iovs[0].iov_len = nbytes;
802 	accel_task->d.iovcnt = 1;
803 	accel_task->nbytes = nbytes;
804 	accel_task->op_code = SPDK_ACCEL_OPC_XOR;
805 	accel_task->src_domain = NULL;
806 	accel_task->dst_domain = NULL;
807 
808 	return accel_submit_task(accel_ch, accel_task);
809 }
810 
811 int
812 spdk_accel_submit_dif_verify(struct spdk_io_channel *ch,
813 			     struct iovec *iovs, size_t iovcnt, uint32_t num_blocks,
814 			     const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
815 			     spdk_accel_completion_cb cb_fn, void *cb_arg)
816 {
817 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
818 	struct spdk_accel_task *accel_task;
819 
820 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
821 	if (accel_task == NULL) {
822 		return -ENOMEM;
823 	}
824 
825 	accel_task->s.iovs = iovs;
826 	accel_task->s.iovcnt = iovcnt;
827 	accel_task->dif.ctx = ctx;
828 	accel_task->dif.err = err;
829 	accel_task->dif.num_blocks = num_blocks;
830 	accel_task->nbytes = num_blocks * ctx->block_size;
831 	accel_task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY;
832 	accel_task->src_domain = NULL;
833 	accel_task->dst_domain = NULL;
834 
835 	return accel_submit_task(accel_ch, accel_task);
836 }
837 
838 int
839 spdk_accel_submit_dif_generate(struct spdk_io_channel *ch,
840 			       struct iovec *iovs, size_t iovcnt, uint32_t num_blocks,
841 			       const struct spdk_dif_ctx *ctx,
842 			       spdk_accel_completion_cb cb_fn, void *cb_arg)
843 {
844 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
845 	struct spdk_accel_task *accel_task;
846 
847 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
848 	if (accel_task == NULL) {
849 		return -ENOMEM;
850 	}
851 
852 	accel_task->s.iovs = iovs;
853 	accel_task->s.iovcnt = iovcnt;
854 	accel_task->dif.ctx = ctx;
855 	accel_task->dif.num_blocks = num_blocks;
856 	accel_task->nbytes = num_blocks * ctx->block_size;
857 	accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE;
858 	accel_task->src_domain = NULL;
859 	accel_task->dst_domain = NULL;
860 
861 	return accel_submit_task(accel_ch, accel_task);
862 }
863 
864 int
865 spdk_accel_submit_dif_generate_copy(struct spdk_io_channel *ch, struct iovec *dst_iovs,
866 				    size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
867 				    uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
868 				    spdk_accel_completion_cb cb_fn, void *cb_arg)
869 {
870 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
871 	struct spdk_accel_task *accel_task;
872 
873 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
874 	if (accel_task == NULL) {
875 		return -ENOMEM;
876 	}
877 
878 	accel_task->s.iovs = src_iovs;
879 	accel_task->s.iovcnt = src_iovcnt;
880 	accel_task->d.iovs = dst_iovs;
881 	accel_task->d.iovcnt = dst_iovcnt;
882 	accel_task->dif.ctx = ctx;
883 	accel_task->dif.num_blocks = num_blocks;
884 	accel_task->nbytes = num_blocks * ctx->block_size;
885 	accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE_COPY;
886 	accel_task->src_domain = NULL;
887 	accel_task->dst_domain = NULL;
888 
889 	return accel_submit_task(accel_ch, accel_task);
890 }
891 
892 static inline struct accel_buffer *
893 accel_get_buf(struct accel_io_channel *ch, uint64_t len)
894 {
895 	struct accel_buffer *buf;
896 
897 	buf = SLIST_FIRST(&ch->buf_pool);
898 	if (spdk_unlikely(buf == NULL)) {
899 		accel_update_stats(ch, retry.bufdesc, 1);
900 		return NULL;
901 	}
902 
903 	SLIST_REMOVE_HEAD(&ch->buf_pool, link);
904 	buf->len = len;
905 	buf->buf = NULL;
906 	buf->seq = NULL;
907 	buf->cb_fn = NULL;
908 
909 	return buf;
910 }
911 
912 static inline void
913 accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf)
914 {
915 	if (buf->buf != NULL) {
916 		spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len);
917 	}
918 
919 	SLIST_INSERT_HEAD(&ch->buf_pool, buf, link);
920 }
921 
922 static inline struct spdk_accel_sequence *
923 accel_sequence_get(struct accel_io_channel *ch)
924 {
925 	struct spdk_accel_sequence *seq;
926 
927 	seq = SLIST_FIRST(&ch->seq_pool);
928 	if (spdk_unlikely(seq == NULL)) {
929 		accel_update_stats(ch, retry.sequence, 1);
930 		return NULL;
931 	}
932 
933 	SLIST_REMOVE_HEAD(&ch->seq_pool, link);
934 
935 	TAILQ_INIT(&seq->tasks);
936 	SLIST_INIT(&seq->bounce_bufs);
937 
938 	seq->ch = ch;
939 	seq->status = 0;
940 	seq->state = ACCEL_SEQUENCE_STATE_INIT;
941 	seq->in_process_sequence = false;
942 
943 	return seq;
944 }
945 
946 static inline void
947 accel_sequence_put(struct spdk_accel_sequence *seq)
948 {
949 	struct accel_io_channel *ch = seq->ch;
950 	struct accel_buffer *buf;
951 
952 	while (!SLIST_EMPTY(&seq->bounce_bufs)) {
953 		buf = SLIST_FIRST(&seq->bounce_bufs);
954 		SLIST_REMOVE_HEAD(&seq->bounce_bufs, link);
955 		accel_put_buf(seq->ch, buf);
956 	}
957 
958 	assert(TAILQ_EMPTY(&seq->tasks));
959 	seq->ch = NULL;
960 
961 	SLIST_INSERT_HEAD(&ch->seq_pool, seq, link);
962 }
963 
964 static void accel_sequence_task_cb(void *cb_arg, int status);
965 
966 static inline struct spdk_accel_task *
967 accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq,
968 			spdk_accel_step_cb cb_fn, void *cb_arg)
969 {
970 	struct spdk_accel_task *task;
971 
972 	task = _get_task(ch, NULL, NULL);
973 	if (spdk_unlikely(task == NULL)) {
974 		return task;
975 	}
976 
977 	task->step_cb_fn = cb_fn;
978 	task->cb_arg = cb_arg;
979 	task->seq = seq;
980 
981 	return task;
982 }
983 
984 int
985 spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
986 		       struct iovec *dst_iovs, uint32_t dst_iovcnt,
987 		       struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
988 		       struct iovec *src_iovs, uint32_t src_iovcnt,
989 		       struct spdk_memory_domain *src_domain, void *src_domain_ctx,
990 		       spdk_accel_step_cb cb_fn, void *cb_arg)
991 {
992 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
993 	struct spdk_accel_task *task;
994 	struct spdk_accel_sequence *seq = *pseq;
995 
996 	if (seq == NULL) {
997 		seq = accel_sequence_get(accel_ch);
998 		if (spdk_unlikely(seq == NULL)) {
999 			return -ENOMEM;
1000 		}
1001 	}
1002 
1003 	assert(seq->ch == accel_ch);
1004 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1005 	if (spdk_unlikely(task == NULL)) {
1006 		if (*pseq == NULL) {
1007 			accel_sequence_put(seq);
1008 		}
1009 
1010 		return -ENOMEM;
1011 	}
1012 
1013 	task->dst_domain = dst_domain;
1014 	task->dst_domain_ctx = dst_domain_ctx;
1015 	task->d.iovs = dst_iovs;
1016 	task->d.iovcnt = dst_iovcnt;
1017 	task->src_domain = src_domain;
1018 	task->src_domain_ctx = src_domain_ctx;
1019 	task->s.iovs = src_iovs;
1020 	task->s.iovcnt = src_iovcnt;
1021 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1022 	task->op_code = SPDK_ACCEL_OPC_COPY;
1023 
1024 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1025 	*pseq = seq;
1026 
1027 	return 0;
1028 }
1029 
1030 int
1031 spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1032 		       void *buf, uint64_t len,
1033 		       struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern,
1034 		       spdk_accel_step_cb cb_fn, void *cb_arg)
1035 {
1036 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1037 	struct spdk_accel_task *task;
1038 	struct spdk_accel_sequence *seq = *pseq;
1039 
1040 	if (seq == NULL) {
1041 		seq = accel_sequence_get(accel_ch);
1042 		if (spdk_unlikely(seq == NULL)) {
1043 			return -ENOMEM;
1044 		}
1045 	}
1046 
1047 	assert(seq->ch == accel_ch);
1048 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1049 	if (spdk_unlikely(task == NULL)) {
1050 		if (*pseq == NULL) {
1051 			accel_sequence_put(seq);
1052 		}
1053 
1054 		return -ENOMEM;
1055 	}
1056 
1057 	memset(&task->fill_pattern, pattern, sizeof(uint64_t));
1058 
1059 	task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1060 	if (spdk_unlikely(!task->aux)) {
1061 		SPDK_ERRLOG("Fatal problem, aux data was not allocated\n");
1062 		if (*pseq == NULL) {
1063 			accel_sequence_put((seq));
1064 		}
1065 		STAILQ_INSERT_HEAD(&task->accel_ch->task_pool, task, link);
1066 		task->seq = NULL;
1067 		assert(0);
1068 		return -ENOMEM;
1069 	}
1070 	SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1071 	task->has_aux = true;
1072 
1073 	task->d.iovs = &task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
1074 	task->d.iovs[0].iov_base = buf;
1075 	task->d.iovs[0].iov_len = len;
1076 	task->d.iovcnt = 1;
1077 	task->nbytes = len;
1078 	task->src_domain = NULL;
1079 	task->dst_domain = domain;
1080 	task->dst_domain_ctx = domain_ctx;
1081 	task->op_code = SPDK_ACCEL_OPC_FILL;
1082 
1083 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1084 	*pseq = seq;
1085 
1086 	return 0;
1087 }
1088 
1089 int
1090 spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1091 			     struct iovec *dst_iovs, size_t dst_iovcnt,
1092 			     struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1093 			     struct iovec *src_iovs, size_t src_iovcnt,
1094 			     struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1095 			     spdk_accel_step_cb cb_fn, void *cb_arg)
1096 {
1097 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1098 	struct spdk_accel_task *task;
1099 	struct spdk_accel_sequence *seq = *pseq;
1100 
1101 	if (seq == NULL) {
1102 		seq = accel_sequence_get(accel_ch);
1103 		if (spdk_unlikely(seq == NULL)) {
1104 			return -ENOMEM;
1105 		}
1106 	}
1107 
1108 	assert(seq->ch == accel_ch);
1109 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1110 	if (spdk_unlikely(task == NULL)) {
1111 		if (*pseq == NULL) {
1112 			accel_sequence_put(seq);
1113 		}
1114 
1115 		return -ENOMEM;
1116 	}
1117 
1118 	/* TODO: support output_size for chaining */
1119 	task->output_size = NULL;
1120 	task->dst_domain = dst_domain;
1121 	task->dst_domain_ctx = dst_domain_ctx;
1122 	task->d.iovs = dst_iovs;
1123 	task->d.iovcnt = dst_iovcnt;
1124 	task->src_domain = src_domain;
1125 	task->src_domain_ctx = src_domain_ctx;
1126 	task->s.iovs = src_iovs;
1127 	task->s.iovcnt = src_iovcnt;
1128 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1129 	task->op_code = SPDK_ACCEL_OPC_DECOMPRESS;
1130 
1131 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1132 	*pseq = seq;
1133 
1134 	return 0;
1135 }
1136 
1137 int
1138 spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1139 			  struct spdk_accel_crypto_key *key,
1140 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
1141 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1142 			  struct iovec *src_iovs, uint32_t src_iovcnt,
1143 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1144 			  uint64_t iv, uint32_t block_size,
1145 			  spdk_accel_step_cb cb_fn, void *cb_arg)
1146 {
1147 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1148 	struct spdk_accel_task *task;
1149 	struct spdk_accel_sequence *seq = *pseq;
1150 
1151 	assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size);
1152 
1153 	if (seq == NULL) {
1154 		seq = accel_sequence_get(accel_ch);
1155 		if (spdk_unlikely(seq == NULL)) {
1156 			return -ENOMEM;
1157 		}
1158 	}
1159 
1160 	assert(seq->ch == accel_ch);
1161 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1162 	if (spdk_unlikely(task == NULL)) {
1163 		if (*pseq == NULL) {
1164 			accel_sequence_put(seq);
1165 		}
1166 
1167 		return -ENOMEM;
1168 	}
1169 
1170 	task->crypto_key = key;
1171 	task->src_domain = src_domain;
1172 	task->src_domain_ctx = src_domain_ctx;
1173 	task->s.iovs = src_iovs;
1174 	task->s.iovcnt = src_iovcnt;
1175 	task->dst_domain = dst_domain;
1176 	task->dst_domain_ctx = dst_domain_ctx;
1177 	task->d.iovs = dst_iovs;
1178 	task->d.iovcnt = dst_iovcnt;
1179 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1180 	task->iv = iv;
1181 	task->block_size = block_size;
1182 	task->op_code = SPDK_ACCEL_OPC_ENCRYPT;
1183 
1184 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1185 	*pseq = seq;
1186 
1187 	return 0;
1188 }
1189 
1190 int
1191 spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1192 			  struct spdk_accel_crypto_key *key,
1193 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
1194 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1195 			  struct iovec *src_iovs, uint32_t src_iovcnt,
1196 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1197 			  uint64_t iv, uint32_t block_size,
1198 			  spdk_accel_step_cb cb_fn, void *cb_arg)
1199 {
1200 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1201 	struct spdk_accel_task *task;
1202 	struct spdk_accel_sequence *seq = *pseq;
1203 
1204 	assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size);
1205 
1206 	if (seq == NULL) {
1207 		seq = accel_sequence_get(accel_ch);
1208 		if (spdk_unlikely(seq == NULL)) {
1209 			return -ENOMEM;
1210 		}
1211 	}
1212 
1213 	assert(seq->ch == accel_ch);
1214 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1215 	if (spdk_unlikely(task == NULL)) {
1216 		if (*pseq == NULL) {
1217 			accel_sequence_put(seq);
1218 		}
1219 
1220 		return -ENOMEM;
1221 	}
1222 
1223 	task->crypto_key = key;
1224 	task->src_domain = src_domain;
1225 	task->src_domain_ctx = src_domain_ctx;
1226 	task->s.iovs = src_iovs;
1227 	task->s.iovcnt = src_iovcnt;
1228 	task->dst_domain = dst_domain;
1229 	task->dst_domain_ctx = dst_domain_ctx;
1230 	task->d.iovs = dst_iovs;
1231 	task->d.iovcnt = dst_iovcnt;
1232 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1233 	task->iv = iv;
1234 	task->block_size = block_size;
1235 	task->op_code = SPDK_ACCEL_OPC_DECRYPT;
1236 
1237 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1238 	*pseq = seq;
1239 
1240 	return 0;
1241 }
1242 
1243 int
1244 spdk_accel_append_crc32c(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1245 			 uint32_t *dst, struct iovec *iovs, uint32_t iovcnt,
1246 			 struct spdk_memory_domain *domain, void *domain_ctx,
1247 			 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg)
1248 {
1249 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1250 	struct spdk_accel_task *task;
1251 	struct spdk_accel_sequence *seq = *pseq;
1252 
1253 	if (seq == NULL) {
1254 		seq = accel_sequence_get(accel_ch);
1255 		if (spdk_unlikely(seq == NULL)) {
1256 			return -ENOMEM;
1257 		}
1258 	}
1259 
1260 	assert(seq->ch == accel_ch);
1261 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1262 	if (spdk_unlikely(task == NULL)) {
1263 		if (*pseq == NULL) {
1264 			accel_sequence_put(seq);
1265 		}
1266 
1267 		return -ENOMEM;
1268 	}
1269 
1270 	task->s.iovs = iovs;
1271 	task->s.iovcnt = iovcnt;
1272 	task->src_domain = domain;
1273 	task->src_domain_ctx = domain_ctx;
1274 	task->nbytes = accel_get_iovlen(iovs, iovcnt);
1275 	task->crc_dst = dst;
1276 	task->seed = seed;
1277 	task->op_code = SPDK_ACCEL_OPC_CRC32C;
1278 	task->dst_domain = NULL;
1279 
1280 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1281 	*pseq = seq;
1282 
1283 	return 0;
1284 }
1285 
1286 int
1287 spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf,
1288 		   struct spdk_memory_domain **domain, void **domain_ctx)
1289 {
1290 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1291 	struct accel_buffer *accel_buf;
1292 
1293 	accel_buf = accel_get_buf(accel_ch, len);
1294 	if (spdk_unlikely(accel_buf == NULL)) {
1295 		return -ENOMEM;
1296 	}
1297 
1298 	accel_buf->ch = accel_ch;
1299 
1300 	/* We always return the same pointer and identify the buffers through domain_ctx */
1301 	*buf = ACCEL_BUFFER_BASE;
1302 	*domain_ctx = accel_buf;
1303 	*domain = g_accel_domain;
1304 
1305 	return 0;
1306 }
1307 
1308 void
1309 spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf,
1310 		   struct spdk_memory_domain *domain, void *domain_ctx)
1311 {
1312 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1313 	struct accel_buffer *accel_buf = domain_ctx;
1314 
1315 	assert(domain == g_accel_domain);
1316 	assert(buf == ACCEL_BUFFER_BASE);
1317 
1318 	accel_put_buf(accel_ch, accel_buf);
1319 }
1320 
1321 static void
1322 accel_sequence_complete_task(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1323 {
1324 	struct accel_io_channel *ch = seq->ch;
1325 	spdk_accel_step_cb cb_fn;
1326 	void *cb_arg;
1327 
1328 	TAILQ_REMOVE(&seq->tasks, task, seq_link);
1329 	cb_fn = task->step_cb_fn;
1330 	cb_arg = task->cb_arg;
1331 	task->seq = NULL;
1332 	if (task->has_aux) {
1333 		SLIST_INSERT_HEAD(&ch->task_aux_data_pool, task->aux, link);
1334 		task->aux = NULL;
1335 		task->has_aux = false;
1336 	}
1337 	STAILQ_INSERT_HEAD(&ch->task_pool, task, link);
1338 	if (cb_fn != NULL) {
1339 		cb_fn(cb_arg);
1340 	}
1341 }
1342 
1343 static void
1344 accel_sequence_complete_tasks(struct spdk_accel_sequence *seq)
1345 {
1346 	struct spdk_accel_task *task;
1347 
1348 	while (!TAILQ_EMPTY(&seq->tasks)) {
1349 		task = TAILQ_FIRST(&seq->tasks);
1350 		accel_sequence_complete_task(seq, task);
1351 	}
1352 }
1353 
1354 static void
1355 accel_sequence_complete(struct spdk_accel_sequence *seq)
1356 {
1357 	SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, seq->status);
1358 
1359 	accel_update_stats(seq->ch, sequence_executed, 1);
1360 	if (spdk_unlikely(seq->status != 0)) {
1361 		accel_update_stats(seq->ch, sequence_failed, 1);
1362 	}
1363 
1364 	/* First notify all users that appended operations to this sequence */
1365 	accel_sequence_complete_tasks(seq);
1366 
1367 	/* Then notify the user that finished the sequence */
1368 	seq->cb_fn(seq->cb_arg, seq->status);
1369 
1370 	accel_sequence_put(seq);
1371 }
1372 
1373 static void
1374 accel_update_virt_iov(struct iovec *diov, struct iovec *siov, struct accel_buffer *accel_buf)
1375 {
1376 	uintptr_t offset;
1377 
1378 	offset = (uintptr_t)siov->iov_base & ACCEL_BUFFER_OFFSET_MASK;
1379 	assert(offset < accel_buf->len);
1380 
1381 	diov->iov_base = (char *)accel_buf->buf + offset;
1382 	diov->iov_len = siov->iov_len;
1383 }
1384 
1385 static void
1386 accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf)
1387 {
1388 	struct spdk_accel_task *task;
1389 	struct iovec *iov;
1390 
1391 	/* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks
1392 	 * in a sequence that were using it.
1393 	 */
1394 	TAILQ_FOREACH(task, &seq->tasks, seq_link) {
1395 		if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) {
1396 			if (!task->has_aux) {
1397 				task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1398 				assert(task->aux && "Can't allocate aux data structure");
1399 				task->has_aux = true;
1400 				SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1401 			}
1402 
1403 			iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_SRC];
1404 			assert(task->s.iovcnt == 1);
1405 			accel_update_virt_iov(iov, &task->s.iovs[0], buf);
1406 			task->src_domain = NULL;
1407 			task->s.iovs = iov;
1408 		}
1409 		if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) {
1410 			if (!task->has_aux) {
1411 				task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1412 				assert(task->aux && "Can't allocate aux data structure");
1413 				task->has_aux = true;
1414 				SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1415 			}
1416 
1417 			iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_DST];
1418 			assert(task->d.iovcnt == 1);
1419 			accel_update_virt_iov(iov, &task->d.iovs[0], buf);
1420 			task->dst_domain = NULL;
1421 			task->d.iovs = iov;
1422 		}
1423 	}
1424 }
1425 
1426 static void accel_process_sequence(struct spdk_accel_sequence *seq);
1427 
1428 static void
1429 accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf)
1430 {
1431 	struct accel_buffer *accel_buf;
1432 
1433 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1434 
1435 	assert(accel_buf->seq != NULL);
1436 	assert(accel_buf->buf == NULL);
1437 	accel_buf->buf = buf;
1438 
1439 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1440 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1441 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1442 	accel_process_sequence(accel_buf->seq);
1443 }
1444 
1445 static bool
1446 accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf,
1447 			 spdk_iobuf_get_cb cb_fn)
1448 {
1449 	struct accel_io_channel *ch = seq->ch;
1450 
1451 	assert(buf->seq == NULL);
1452 
1453 	buf->seq = seq;
1454 
1455 	/* Buffer might be already allocated by memory domain translation. */
1456 	if (buf->buf) {
1457 		return true;
1458 	}
1459 
1460 	buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn);
1461 	if (spdk_unlikely(buf->buf == NULL)) {
1462 		accel_update_stats(ch, retry.iobuf, 1);
1463 		return false;
1464 	}
1465 
1466 	return true;
1467 }
1468 
1469 static bool
1470 accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1471 {
1472 	/* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to
1473 	 * NULL */
1474 	if (task->src_domain == g_accel_domain) {
1475 		if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx,
1476 					      accel_iobuf_get_virtbuf_cb)) {
1477 			return false;
1478 		}
1479 
1480 		accel_sequence_set_virtbuf(seq, task->src_domain_ctx);
1481 	}
1482 
1483 	if (task->dst_domain == g_accel_domain) {
1484 		if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx,
1485 					      accel_iobuf_get_virtbuf_cb)) {
1486 			return false;
1487 		}
1488 
1489 		accel_sequence_set_virtbuf(seq, task->dst_domain_ctx);
1490 	}
1491 
1492 	return true;
1493 }
1494 
1495 static void
1496 accel_sequence_get_buf_cb(struct spdk_iobuf_entry *entry, void *buf)
1497 {
1498 	struct accel_buffer *accel_buf;
1499 
1500 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1501 
1502 	assert(accel_buf->seq != NULL);
1503 	assert(accel_buf->buf == NULL);
1504 	accel_buf->buf = buf;
1505 
1506 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1507 	accel_buf->cb_fn(accel_buf->seq, accel_buf->cb_ctx);
1508 }
1509 
1510 bool
1511 spdk_accel_alloc_sequence_buf(struct spdk_accel_sequence *seq, void *buf,
1512 			      struct spdk_memory_domain *domain, void *domain_ctx,
1513 			      spdk_accel_sequence_get_buf_cb cb_fn, void *cb_ctx)
1514 {
1515 	struct accel_buffer *accel_buf = domain_ctx;
1516 
1517 	assert(domain == g_accel_domain);
1518 	accel_buf->cb_fn = cb_fn;
1519 	accel_buf->cb_ctx = cb_ctx;
1520 
1521 	if (!accel_sequence_alloc_buf(seq, accel_buf, accel_sequence_get_buf_cb)) {
1522 		return false;
1523 	}
1524 
1525 	accel_sequence_set_virtbuf(seq, accel_buf);
1526 
1527 	return true;
1528 }
1529 
1530 struct spdk_accel_task *
1531 spdk_accel_sequence_first_task(struct spdk_accel_sequence *seq)
1532 {
1533 	return TAILQ_FIRST(&seq->tasks);
1534 }
1535 
1536 struct spdk_accel_task *
1537 spdk_accel_sequence_next_task(struct spdk_accel_task *task)
1538 {
1539 	return TAILQ_NEXT(task, seq_link);
1540 }
1541 
1542 static inline void
1543 accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs,
1544 			uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx,
1545 			struct accel_buffer *buf)
1546 {
1547 	bounce->orig_iovs = *iovs;
1548 	bounce->orig_iovcnt = *iovcnt;
1549 	bounce->orig_domain = *domain;
1550 	bounce->orig_domain_ctx = *domain_ctx;
1551 	bounce->iov.iov_base = buf->buf;
1552 	bounce->iov.iov_len = buf->len;
1553 
1554 	*iovs = &bounce->iov;
1555 	*iovcnt = 1;
1556 	*domain = NULL;
1557 }
1558 
1559 static void
1560 accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1561 {
1562 	struct spdk_accel_task *task;
1563 	struct accel_buffer *accel_buf;
1564 
1565 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1566 	assert(accel_buf->buf == NULL);
1567 	accel_buf->buf = buf;
1568 
1569 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1570 	assert(task != NULL);
1571 
1572 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1573 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1574 	assert(task->aux);
1575 	assert(task->has_aux);
1576 	accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain,
1577 				&task->src_domain_ctx, accel_buf);
1578 	accel_process_sequence(accel_buf->seq);
1579 }
1580 
1581 static void
1582 accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1583 {
1584 	struct spdk_accel_task *task;
1585 	struct accel_buffer *accel_buf;
1586 
1587 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1588 	assert(accel_buf->buf == NULL);
1589 	accel_buf->buf = buf;
1590 
1591 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1592 	assert(task != NULL);
1593 
1594 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1595 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1596 	assert(task->aux);
1597 	assert(task->has_aux);
1598 	accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain,
1599 				&task->dst_domain_ctx, accel_buf);
1600 	accel_process_sequence(accel_buf->seq);
1601 }
1602 
1603 static int
1604 accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1605 {
1606 	struct accel_buffer *buf;
1607 
1608 	if (task->src_domain != NULL) {
1609 		/* By the time we're here, accel buffers should have been allocated */
1610 		assert(task->src_domain != g_accel_domain);
1611 
1612 		if (!task->has_aux) {
1613 			task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1614 			if (spdk_unlikely(!task->aux)) {
1615 				SPDK_ERRLOG("Can't allocate aux data structure\n");
1616 				assert(0);
1617 				return -EAGAIN;
1618 			}
1619 			task->has_aux = true;
1620 			SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1621 		}
1622 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt));
1623 		if (buf == NULL) {
1624 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1625 			return -ENOMEM;
1626 		}
1627 
1628 		SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link);
1629 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) {
1630 			return -EAGAIN;
1631 		}
1632 
1633 		accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt,
1634 					&task->src_domain, &task->src_domain_ctx, buf);
1635 	}
1636 
1637 	if (task->dst_domain != NULL) {
1638 		/* By the time we're here, accel buffers should have been allocated */
1639 		assert(task->dst_domain != g_accel_domain);
1640 
1641 		if (!task->has_aux) {
1642 			task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1643 			if (spdk_unlikely(!task->aux)) {
1644 				SPDK_ERRLOG("Can't allocate aux data structure\n");
1645 				assert(0);
1646 				return -EAGAIN;
1647 			}
1648 			task->has_aux = true;
1649 			SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1650 		}
1651 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt));
1652 		if (buf == NULL) {
1653 			/* The src buffer will be released when a sequence is completed */
1654 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1655 			return -ENOMEM;
1656 		}
1657 
1658 		SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link);
1659 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) {
1660 			return -EAGAIN;
1661 		}
1662 
1663 		accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt,
1664 					&task->dst_domain, &task->dst_domain_ctx, buf);
1665 	}
1666 
1667 	return 0;
1668 }
1669 
1670 static void
1671 accel_task_pull_data_cb(void *ctx, int status)
1672 {
1673 	struct spdk_accel_sequence *seq = ctx;
1674 
1675 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1676 	if (spdk_likely(status == 0)) {
1677 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1678 	} else {
1679 		accel_sequence_set_fail(seq, status);
1680 	}
1681 
1682 	accel_process_sequence(seq);
1683 }
1684 
1685 static void
1686 accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1687 {
1688 	int rc;
1689 
1690 	assert(task->has_aux);
1691 	assert(task->aux);
1692 	assert(task->aux->bounce.s.orig_iovs != NULL);
1693 	assert(task->aux->bounce.s.orig_domain != NULL);
1694 	assert(task->aux->bounce.s.orig_domain != g_accel_domain);
1695 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1696 
1697 	rc = spdk_memory_domain_pull_data(task->aux->bounce.s.orig_domain,
1698 					  task->aux->bounce.s.orig_domain_ctx,
1699 					  task->aux->bounce.s.orig_iovs, task->aux->bounce.s.orig_iovcnt,
1700 					  task->s.iovs, task->s.iovcnt,
1701 					  accel_task_pull_data_cb, seq);
1702 	if (spdk_unlikely(rc != 0)) {
1703 		SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n",
1704 			    spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc);
1705 		accel_sequence_set_fail(seq, rc);
1706 	}
1707 }
1708 
1709 static void
1710 accel_task_push_data_cb(void *ctx, int status)
1711 {
1712 	struct spdk_accel_sequence *seq = ctx;
1713 
1714 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1715 	if (spdk_likely(status == 0)) {
1716 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1717 	} else {
1718 		accel_sequence_set_fail(seq, status);
1719 	}
1720 
1721 	accel_process_sequence(seq);
1722 }
1723 
1724 static void
1725 accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1726 {
1727 	int rc;
1728 
1729 	assert(task->has_aux);
1730 	assert(task->aux);
1731 	assert(task->aux->bounce.d.orig_iovs != NULL);
1732 	assert(task->aux->bounce.d.orig_domain != NULL);
1733 	assert(task->aux->bounce.d.orig_domain != g_accel_domain);
1734 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1735 
1736 	rc = spdk_memory_domain_push_data(task->aux->bounce.d.orig_domain,
1737 					  task->aux->bounce.d.orig_domain_ctx,
1738 					  task->aux->bounce.d.orig_iovs, task->aux->bounce.d.orig_iovcnt,
1739 					  task->d.iovs, task->d.iovcnt,
1740 					  accel_task_push_data_cb, seq);
1741 	if (spdk_unlikely(rc != 0)) {
1742 		SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n",
1743 			    spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc);
1744 		accel_sequence_set_fail(seq, rc);
1745 	}
1746 }
1747 
1748 static void
1749 accel_process_sequence(struct spdk_accel_sequence *seq)
1750 {
1751 	struct accel_io_channel *accel_ch = seq->ch;
1752 	struct spdk_accel_task *task;
1753 	enum accel_sequence_state state;
1754 	int rc;
1755 
1756 	/* Prevent recursive calls to this function */
1757 	if (spdk_unlikely(seq->in_process_sequence)) {
1758 		return;
1759 	}
1760 	seq->in_process_sequence = true;
1761 
1762 	task = TAILQ_FIRST(&seq->tasks);
1763 	do {
1764 		state = seq->state;
1765 		switch (state) {
1766 		case ACCEL_SEQUENCE_STATE_INIT:
1767 			if (g_accel_driver != NULL) {
1768 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS);
1769 				break;
1770 			}
1771 		/* Fall through */
1772 		case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF:
1773 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1774 			if (!accel_sequence_check_virtbuf(seq, task)) {
1775 				/* We couldn't allocate a buffer, wait until one is available */
1776 				break;
1777 			}
1778 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1779 		/* Fall through */
1780 		case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF:
1781 			/* If a module supports memory domains, we don't need to allocate bounce
1782 			 * buffers */
1783 			if (g_modules_opc[task->op_code].supports_memory_domains) {
1784 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1785 				break;
1786 			}
1787 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1788 			rc = accel_sequence_check_bouncebuf(seq, task);
1789 			if (spdk_unlikely(rc != 0)) {
1790 				/* We couldn't allocate a buffer, wait until one is available */
1791 				if (rc == -EAGAIN) {
1792 					break;
1793 				}
1794 				accel_sequence_set_fail(seq, rc);
1795 				break;
1796 			}
1797 			if (task->has_aux && task->s.iovs == &task->aux->bounce.s.iov) {
1798 				assert(task->aux->bounce.s.orig_iovs);
1799 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA);
1800 				break;
1801 			}
1802 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1803 		/* Fall through */
1804 		case ACCEL_SEQUENCE_STATE_EXEC_TASK:
1805 			SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n",
1806 				      g_opcode_strings[task->op_code], seq);
1807 
1808 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK);
1809 			rc = accel_submit_task(accel_ch, task);
1810 			if (spdk_unlikely(rc != 0)) {
1811 				SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n",
1812 					    g_opcode_strings[task->op_code], seq);
1813 				accel_sequence_set_fail(seq, rc);
1814 			}
1815 			break;
1816 		case ACCEL_SEQUENCE_STATE_PULL_DATA:
1817 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1818 			accel_task_pull_data(seq, task);
1819 			break;
1820 		case ACCEL_SEQUENCE_STATE_COMPLETE_TASK:
1821 			if (task->has_aux && task->d.iovs == &task->aux->bounce.d.iov) {
1822 				assert(task->aux->bounce.d.orig_iovs);
1823 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA);
1824 				break;
1825 			}
1826 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1827 			break;
1828 		case ACCEL_SEQUENCE_STATE_PUSH_DATA:
1829 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1830 			accel_task_push_data(seq, task);
1831 			break;
1832 		case ACCEL_SEQUENCE_STATE_NEXT_TASK:
1833 			accel_sequence_complete_task(seq, task);
1834 			/* Check if there are any remaining tasks */
1835 			task = TAILQ_FIRST(&seq->tasks);
1836 			if (task == NULL) {
1837 				/* Immediately return here to make sure we don't touch the sequence
1838 				 * after it's completed */
1839 				accel_sequence_complete(seq);
1840 				return;
1841 			}
1842 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT);
1843 			break;
1844 		case ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS:
1845 			assert(!TAILQ_EMPTY(&seq->tasks));
1846 
1847 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS);
1848 			rc = g_accel_driver->execute_sequence(accel_ch->driver_channel, seq);
1849 			if (spdk_unlikely(rc != 0)) {
1850 				SPDK_ERRLOG("Failed to execute sequence: %p using driver: %s\n",
1851 					    seq, g_accel_driver->name);
1852 				accel_sequence_set_fail(seq, rc);
1853 			}
1854 			break;
1855 		case ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS:
1856 			/* Get the task again, as the driver might have completed some tasks
1857 			 * synchronously */
1858 			task = TAILQ_FIRST(&seq->tasks);
1859 			if (task == NULL) {
1860 				/* Immediately return here to make sure we don't touch the sequence
1861 				 * after it's completed */
1862 				accel_sequence_complete(seq);
1863 				return;
1864 			}
1865 			/* We don't want to execute the next task through the driver, so we
1866 			 * explicitly omit the INIT state here */
1867 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1868 			break;
1869 		case ACCEL_SEQUENCE_STATE_ERROR:
1870 			/* Immediately return here to make sure we don't touch the sequence
1871 			 * after it's completed */
1872 			assert(seq->status != 0);
1873 			accel_sequence_complete(seq);
1874 			return;
1875 		case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF:
1876 		case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF:
1877 		case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA:
1878 		case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1879 		case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA:
1880 		case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS:
1881 			break;
1882 		default:
1883 			assert(0 && "bad state");
1884 			break;
1885 		}
1886 	} while (seq->state != state);
1887 
1888 	seq->in_process_sequence = false;
1889 }
1890 
1891 static void
1892 accel_sequence_task_cb(void *cb_arg, int status)
1893 {
1894 	struct spdk_accel_sequence *seq = cb_arg;
1895 	struct spdk_accel_task *task = TAILQ_FIRST(&seq->tasks);
1896 
1897 	switch (seq->state) {
1898 	case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1899 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK);
1900 		if (spdk_unlikely(status != 0)) {
1901 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n",
1902 				    g_opcode_strings[task->op_code], seq);
1903 			accel_sequence_set_fail(seq, status);
1904 		}
1905 
1906 		accel_process_sequence(seq);
1907 		break;
1908 	case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS:
1909 		assert(g_accel_driver != NULL);
1910 		/* Immediately remove the task from the outstanding list to make sure the next call
1911 		 * to spdk_accel_sequence_first_task() doesn't return it */
1912 		accel_sequence_complete_task(seq, task);
1913 		if (spdk_unlikely(status != 0)) {
1914 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p through "
1915 				    "driver: %s\n", g_opcode_strings[task->op_code], seq,
1916 				    g_accel_driver->name);
1917 			/* Update status without using accel_sequence_set_fail() to avoid changing
1918 			 * seq's state to ERROR until driver calls spdk_accel_sequence_continue() */
1919 			seq->status = status;
1920 		}
1921 		break;
1922 	default:
1923 		assert(0 && "bad state");
1924 		break;
1925 	}
1926 }
1927 
1928 void
1929 spdk_accel_sequence_continue(struct spdk_accel_sequence *seq)
1930 {
1931 	assert(g_accel_driver != NULL);
1932 	assert(seq->state == ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS);
1933 
1934 	if (spdk_likely(seq->status == 0)) {
1935 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS);
1936 	} else {
1937 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
1938 	}
1939 
1940 	accel_process_sequence(seq);
1941 }
1942 
1943 static bool
1944 accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt)
1945 {
1946 	/* For now, just do a dumb check that the iovecs arrays are exactly the same */
1947 	if (iovacnt != iovbcnt) {
1948 		return false;
1949 	}
1950 
1951 	return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0;
1952 }
1953 
1954 static bool
1955 accel_task_set_dstbuf(struct spdk_accel_task *task, struct spdk_accel_task *next)
1956 {
1957 	struct spdk_accel_task *prev;
1958 
1959 	switch (task->op_code) {
1960 	case SPDK_ACCEL_OPC_DECOMPRESS:
1961 	case SPDK_ACCEL_OPC_FILL:
1962 	case SPDK_ACCEL_OPC_ENCRYPT:
1963 	case SPDK_ACCEL_OPC_DECRYPT:
1964 		if (task->dst_domain != next->src_domain) {
1965 			return false;
1966 		}
1967 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
1968 					next->s.iovs, next->s.iovcnt)) {
1969 			return false;
1970 		}
1971 		task->d.iovs = next->d.iovs;
1972 		task->d.iovcnt = next->d.iovcnt;
1973 		task->dst_domain = next->dst_domain;
1974 		task->dst_domain_ctx = next->dst_domain_ctx;
1975 		break;
1976 	case SPDK_ACCEL_OPC_CRC32C:
1977 		/* crc32 is special, because it doesn't have a dst buffer */
1978 		if (task->src_domain != next->src_domain) {
1979 			return false;
1980 		}
1981 		if (!accel_compare_iovs(task->s.iovs, task->s.iovcnt,
1982 					next->s.iovs, next->s.iovcnt)) {
1983 			return false;
1984 		}
1985 		/* We can only change crc32's buffer if we can change previous task's buffer */
1986 		prev = TAILQ_PREV(task, accel_sequence_tasks, seq_link);
1987 		if (prev == NULL) {
1988 			return false;
1989 		}
1990 		if (!accel_task_set_dstbuf(prev, next)) {
1991 			return false;
1992 		}
1993 		task->s.iovs = next->d.iovs;
1994 		task->s.iovcnt = next->d.iovcnt;
1995 		task->src_domain = next->dst_domain;
1996 		task->src_domain_ctx = next->dst_domain_ctx;
1997 		break;
1998 	default:
1999 		return false;
2000 	}
2001 
2002 	return true;
2003 }
2004 
2005 static void
2006 accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task,
2007 			   struct spdk_accel_task **next_task)
2008 {
2009 	struct spdk_accel_task *next = *next_task;
2010 
2011 	switch (task->op_code) {
2012 	case SPDK_ACCEL_OPC_COPY:
2013 		/* We only allow changing src of operations that actually have a src, e.g. we never
2014 		 * do it for fill.  Theoretically, it is possible, but we'd have to be careful to
2015 		 * change the src of the operation after fill (which in turn could also be a fill).
2016 		 * So, for the sake of simplicity, skip this type of operations for now.
2017 		 */
2018 		if (next->op_code != SPDK_ACCEL_OPC_DECOMPRESS &&
2019 		    next->op_code != SPDK_ACCEL_OPC_COPY &&
2020 		    next->op_code != SPDK_ACCEL_OPC_ENCRYPT &&
2021 		    next->op_code != SPDK_ACCEL_OPC_DECRYPT &&
2022 		    next->op_code != SPDK_ACCEL_OPC_CRC32C) {
2023 			break;
2024 		}
2025 		if (task->dst_domain != next->src_domain) {
2026 			break;
2027 		}
2028 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
2029 					next->s.iovs, next->s.iovcnt)) {
2030 			break;
2031 		}
2032 		next->s.iovs = task->s.iovs;
2033 		next->s.iovcnt = task->s.iovcnt;
2034 		next->src_domain = task->src_domain;
2035 		next->src_domain_ctx = task->src_domain_ctx;
2036 		accel_sequence_complete_task(seq, task);
2037 		break;
2038 	case SPDK_ACCEL_OPC_DECOMPRESS:
2039 	case SPDK_ACCEL_OPC_FILL:
2040 	case SPDK_ACCEL_OPC_ENCRYPT:
2041 	case SPDK_ACCEL_OPC_DECRYPT:
2042 	case SPDK_ACCEL_OPC_CRC32C:
2043 		/* We can only merge tasks when one of them is a copy */
2044 		if (next->op_code != SPDK_ACCEL_OPC_COPY) {
2045 			break;
2046 		}
2047 		if (!accel_task_set_dstbuf(task, next)) {
2048 			break;
2049 		}
2050 		/* We're removing next_task from the tasks queue, so we need to update its pointer,
2051 		 * so that the TAILQ_FOREACH_SAFE() loop below works correctly */
2052 		*next_task = TAILQ_NEXT(next, seq_link);
2053 		accel_sequence_complete_task(seq, next);
2054 		break;
2055 	default:
2056 		assert(0 && "bad opcode");
2057 		break;
2058 	}
2059 }
2060 
2061 void
2062 spdk_accel_sequence_finish(struct spdk_accel_sequence *seq,
2063 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
2064 {
2065 	struct spdk_accel_task *task, *next;
2066 
2067 	/* Try to remove any copy operations if possible */
2068 	TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) {
2069 		if (next == NULL) {
2070 			break;
2071 		}
2072 		accel_sequence_merge_tasks(seq, task, &next);
2073 	}
2074 
2075 	seq->cb_fn = cb_fn;
2076 	seq->cb_arg = cb_arg;
2077 
2078 	accel_process_sequence(seq);
2079 }
2080 
2081 void
2082 spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq)
2083 {
2084 	struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks);
2085 	struct spdk_accel_task *task;
2086 
2087 	TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link);
2088 
2089 	while (!TAILQ_EMPTY(&tasks)) {
2090 		task = TAILQ_FIRST(&tasks);
2091 		TAILQ_REMOVE(&tasks, task, seq_link);
2092 		TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link);
2093 	}
2094 }
2095 
2096 void
2097 spdk_accel_sequence_abort(struct spdk_accel_sequence *seq)
2098 {
2099 	if (seq == NULL) {
2100 		return;
2101 	}
2102 
2103 	accel_sequence_complete_tasks(seq);
2104 	accel_sequence_put(seq);
2105 }
2106 
2107 struct spdk_memory_domain *
2108 spdk_accel_get_memory_domain(void)
2109 {
2110 	return g_accel_domain;
2111 }
2112 
2113 static struct spdk_accel_module_if *
2114 _module_find_by_name(const char *name)
2115 {
2116 	struct spdk_accel_module_if *accel_module = NULL;
2117 
2118 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2119 		if (strcmp(name, accel_module->name) == 0) {
2120 			break;
2121 		}
2122 	}
2123 
2124 	return accel_module;
2125 }
2126 
2127 static inline struct spdk_accel_crypto_key *
2128 _accel_crypto_key_get(const char *name)
2129 {
2130 	struct spdk_accel_crypto_key *key;
2131 
2132 	assert(spdk_spin_held(&g_keyring_spin));
2133 
2134 	TAILQ_FOREACH(key, &g_keyring, link) {
2135 		if (strcmp(name, key->param.key_name) == 0) {
2136 			return key;
2137 		}
2138 	}
2139 
2140 	return NULL;
2141 }
2142 
2143 static void
2144 accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key)
2145 {
2146 	if (key->param.hex_key) {
2147 		spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2);
2148 		free(key->param.hex_key);
2149 	}
2150 	if (key->param.hex_key2) {
2151 		spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2);
2152 		free(key->param.hex_key2);
2153 	}
2154 	free(key->param.tweak_mode);
2155 	free(key->param.key_name);
2156 	free(key->param.cipher);
2157 	if (key->key) {
2158 		spdk_memset_s(key->key, key->key_size, 0, key->key_size);
2159 		free(key->key);
2160 	}
2161 	if (key->key2) {
2162 		spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size);
2163 		free(key->key2);
2164 	}
2165 	free(key);
2166 }
2167 
2168 static void
2169 accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key)
2170 {
2171 	assert(key->module_if);
2172 	assert(key->module_if->crypto_key_deinit);
2173 
2174 	key->module_if->crypto_key_deinit(key);
2175 	accel_crypto_key_free_mem(key);
2176 }
2177 
2178 /*
2179  * This function mitigates a timing side channel which could be caused by using strcmp()
2180  * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in
2181  * the article [1] for more details
2182  * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html
2183  */
2184 static bool
2185 accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len)
2186 {
2187 	size_t i;
2188 	volatile size_t x = k1_len ^ k2_len;
2189 
2190 	for (i = 0; ((i < k1_len) & (i < k2_len)); i++) {
2191 		x |= k1[i] ^ k2[i];
2192 	}
2193 
2194 	return x == 0;
2195 }
2196 
2197 static const char *g_tweak_modes[] = {
2198 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA] = "SIMPLE_LBA",
2199 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_JOIN_NEG_LBA_WITH_LBA] = "JOIN_NEG_LBA_WITH_LBA",
2200 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_FULL_LBA] = "INCR_512_FULL_LBA",
2201 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_UPPER_LBA] = "INCR_512_UPPER_LBA",
2202 };
2203 
2204 static const char *g_ciphers[] = {
2205 	[SPDK_ACCEL_CIPHER_AES_CBC] = "AES_CBC",
2206 	[SPDK_ACCEL_CIPHER_AES_XTS] = "AES_XTS",
2207 };
2208 
2209 int
2210 spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param)
2211 {
2212 	struct spdk_accel_module_if *module;
2213 	struct spdk_accel_crypto_key *key;
2214 	size_t hex_key_size, hex_key2_size;
2215 	bool found = false;
2216 	size_t i;
2217 	int rc;
2218 
2219 	if (!param || !param->hex_key || !param->cipher || !param->key_name) {
2220 		return -EINVAL;
2221 	}
2222 
2223 	if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) {
2224 		/* hardly ever possible, but let's check and warn the user */
2225 		SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n");
2226 	}
2227 	module = g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module;
2228 
2229 	if (!module) {
2230 		SPDK_ERRLOG("No accel module found assigned for crypto operation\n");
2231 		return -ENOENT;
2232 	}
2233 
2234 	if (!module->crypto_key_init || !module->crypto_supports_cipher) {
2235 		SPDK_ERRLOG("Module %s doesn't support crypto operations\n", module->name);
2236 		return -ENOTSUP;
2237 	}
2238 
2239 	key = calloc(1, sizeof(*key));
2240 	if (!key) {
2241 		return -ENOMEM;
2242 	}
2243 
2244 	key->param.key_name = strdup(param->key_name);
2245 	if (!key->param.key_name) {
2246 		rc = -ENOMEM;
2247 		goto error;
2248 	}
2249 
2250 	for (i = 0; i < SPDK_COUNTOF(g_ciphers); ++i) {
2251 		assert(g_ciphers[i]);
2252 
2253 		if (strncmp(param->cipher, g_ciphers[i], strlen(g_ciphers[i])) == 0) {
2254 			key->cipher = i;
2255 			found = true;
2256 			break;
2257 		}
2258 	}
2259 
2260 	if (!found) {
2261 		SPDK_ERRLOG("Failed to parse cipher\n");
2262 		rc = -EINVAL;
2263 		goto error;
2264 	}
2265 
2266 	key->param.cipher = strdup(param->cipher);
2267 	if (!key->param.cipher) {
2268 		rc = -ENOMEM;
2269 		goto error;
2270 	}
2271 
2272 	hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2273 	if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
2274 		SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2275 		rc = -EINVAL;
2276 		goto error;
2277 	}
2278 
2279 	if (hex_key_size == 0) {
2280 		SPDK_ERRLOG("key1 size cannot be 0\n");
2281 		rc = -EINVAL;
2282 		goto error;
2283 	}
2284 
2285 	key->param.hex_key = strdup(param->hex_key);
2286 	if (!key->param.hex_key) {
2287 		rc = -ENOMEM;
2288 		goto error;
2289 	}
2290 
2291 	key->key_size = hex_key_size / 2;
2292 	key->key = spdk_unhexlify(key->param.hex_key);
2293 	if (!key->key) {
2294 		SPDK_ERRLOG("Failed to unhexlify key1\n");
2295 		rc = -EINVAL;
2296 		goto error;
2297 	}
2298 
2299 	if (param->hex_key2) {
2300 		hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2301 		if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
2302 			SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2303 			rc = -EINVAL;
2304 			goto error;
2305 		}
2306 
2307 		if (hex_key2_size == 0) {
2308 			SPDK_ERRLOG("key2 size cannot be 0\n");
2309 			rc = -EINVAL;
2310 			goto error;
2311 		}
2312 
2313 		key->param.hex_key2 = strdup(param->hex_key2);
2314 		if (!key->param.hex_key2) {
2315 			rc = -ENOMEM;
2316 			goto error;
2317 		}
2318 
2319 		key->key2_size = hex_key2_size / 2;
2320 		key->key2 = spdk_unhexlify(key->param.hex_key2);
2321 		if (!key->key2) {
2322 			SPDK_ERRLOG("Failed to unhexlify key2\n");
2323 			rc = -EINVAL;
2324 			goto error;
2325 		}
2326 	}
2327 
2328 	key->tweak_mode = ACCEL_CRYPTO_TWEAK_MODE_DEFAULT;
2329 	if (param->tweak_mode) {
2330 		found = false;
2331 
2332 		key->param.tweak_mode = strdup(param->tweak_mode);
2333 		if (!key->param.tweak_mode) {
2334 			rc = -ENOMEM;
2335 			goto error;
2336 		}
2337 
2338 		for (i = 0; i < SPDK_COUNTOF(g_tweak_modes); ++i) {
2339 			assert(g_tweak_modes[i]);
2340 
2341 			if (strncmp(param->tweak_mode, g_tweak_modes[i], strlen(g_tweak_modes[i])) == 0) {
2342 				key->tweak_mode = i;
2343 				found = true;
2344 				break;
2345 			}
2346 		}
2347 
2348 		if (!found) {
2349 			SPDK_ERRLOG("Failed to parse tweak mode\n");
2350 			rc = -EINVAL;
2351 			goto error;
2352 		}
2353 	}
2354 
2355 	if ((!module->crypto_supports_tweak_mode && key->tweak_mode != ACCEL_CRYPTO_TWEAK_MODE_DEFAULT) ||
2356 	    (module->crypto_supports_tweak_mode && !module->crypto_supports_tweak_mode(key->tweak_mode))) {
2357 		SPDK_ERRLOG("Module %s doesn't support %s tweak mode\n", module->name,
2358 			    g_tweak_modes[key->tweak_mode]);
2359 		rc = -EINVAL;
2360 		goto error;
2361 	}
2362 
2363 	if (!module->crypto_supports_cipher(key->cipher, key->key_size)) {
2364 		SPDK_ERRLOG("Module %s doesn't support %s cipher with %zu key size\n", module->name,
2365 			    g_ciphers[key->cipher], key->key_size);
2366 		rc = -EINVAL;
2367 		goto error;
2368 	}
2369 
2370 	if (key->cipher == SPDK_ACCEL_CIPHER_AES_XTS) {
2371 		if (!key->key2) {
2372 			SPDK_ERRLOG("%s key2 is missing\n", g_ciphers[key->cipher]);
2373 			rc = -EINVAL;
2374 			goto error;
2375 		}
2376 
2377 		if (key->key_size != key->key2_size) {
2378 			SPDK_ERRLOG("%s key size %zu is not equal to key2 size %zu\n", g_ciphers[key->cipher],
2379 				    key->key_size,
2380 				    key->key2_size);
2381 			rc = -EINVAL;
2382 			goto error;
2383 		}
2384 
2385 		if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) {
2386 			SPDK_ERRLOG("%s identical keys are not secure\n", g_ciphers[key->cipher]);
2387 			rc = -EINVAL;
2388 			goto error;
2389 		}
2390 	}
2391 
2392 	if (key->cipher == SPDK_ACCEL_CIPHER_AES_CBC) {
2393 		if (key->key2_size) {
2394 			SPDK_ERRLOG("%s doesn't use key2\n", g_ciphers[key->cipher]);
2395 			rc = -EINVAL;
2396 			goto error;
2397 		}
2398 	}
2399 
2400 	key->module_if = module;
2401 
2402 	spdk_spin_lock(&g_keyring_spin);
2403 	if (_accel_crypto_key_get(param->key_name)) {
2404 		rc = -EEXIST;
2405 	} else {
2406 		rc = module->crypto_key_init(key);
2407 		if (rc) {
2408 			SPDK_ERRLOG("Module %s failed to initialize crypto key\n", module->name);
2409 		} else {
2410 			TAILQ_INSERT_TAIL(&g_keyring, key, link);
2411 		}
2412 	}
2413 	spdk_spin_unlock(&g_keyring_spin);
2414 
2415 	if (rc) {
2416 		goto error;
2417 	}
2418 
2419 	return 0;
2420 
2421 error:
2422 	accel_crypto_key_free_mem(key);
2423 	return rc;
2424 }
2425 
2426 int
2427 spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key)
2428 {
2429 	if (!key || !key->module_if) {
2430 		return -EINVAL;
2431 	}
2432 
2433 	spdk_spin_lock(&g_keyring_spin);
2434 	if (!_accel_crypto_key_get(key->param.key_name)) {
2435 		spdk_spin_unlock(&g_keyring_spin);
2436 		return -ENOENT;
2437 	}
2438 	TAILQ_REMOVE(&g_keyring, key, link);
2439 	spdk_spin_unlock(&g_keyring_spin);
2440 
2441 	accel_crypto_key_destroy_unsafe(key);
2442 
2443 	return 0;
2444 }
2445 
2446 struct spdk_accel_crypto_key *
2447 spdk_accel_crypto_key_get(const char *name)
2448 {
2449 	struct spdk_accel_crypto_key *key;
2450 
2451 	spdk_spin_lock(&g_keyring_spin);
2452 	key = _accel_crypto_key_get(name);
2453 	spdk_spin_unlock(&g_keyring_spin);
2454 
2455 	return key;
2456 }
2457 
2458 /* Helper function when accel modules register with the framework. */
2459 void
2460 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module)
2461 {
2462 	struct spdk_accel_module_if *tmp;
2463 
2464 	if (_module_find_by_name(accel_module->name)) {
2465 		SPDK_NOTICELOG("Module %s already registered\n", accel_module->name);
2466 		assert(false);
2467 		return;
2468 	}
2469 
2470 	TAILQ_FOREACH(tmp, &spdk_accel_module_list, tailq) {
2471 		if (accel_module->priority < tmp->priority) {
2472 			break;
2473 		}
2474 	}
2475 
2476 	if (tmp != NULL) {
2477 		TAILQ_INSERT_BEFORE(tmp, accel_module, tailq);
2478 	} else {
2479 		TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq);
2480 	}
2481 }
2482 
2483 /* Framework level channel create callback. */
2484 static int
2485 accel_create_channel(void *io_device, void *ctx_buf)
2486 {
2487 	struct accel_io_channel	*accel_ch = ctx_buf;
2488 	struct spdk_accel_task *accel_task;
2489 	struct spdk_accel_task_aux_data *accel_task_aux;
2490 	struct spdk_accel_sequence *seq;
2491 	struct accel_buffer *buf;
2492 	size_t task_size_aligned;
2493 	uint8_t *task_mem;
2494 	uint32_t i = 0, j;
2495 	int rc;
2496 
2497 	task_size_aligned = SPDK_ALIGN_CEIL(g_max_accel_module_size, SPDK_CACHE_LINE_SIZE);
2498 	accel_ch->task_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE,
2499 				   g_opts.task_count * task_size_aligned);
2500 	if (!accel_ch->task_pool_base) {
2501 		return -ENOMEM;
2502 	}
2503 	memset(accel_ch->task_pool_base, 0, g_opts.task_count * task_size_aligned);
2504 
2505 	accel_ch->seq_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE,
2506 						g_opts.sequence_count * sizeof(struct spdk_accel_sequence));
2507 	if (accel_ch->seq_pool_base == NULL) {
2508 		goto err;
2509 	}
2510 	memset(accel_ch->seq_pool_base, 0, g_opts.sequence_count * sizeof(struct spdk_accel_sequence));
2511 
2512 	accel_ch->task_aux_data_base = calloc(g_opts.task_count, sizeof(struct spdk_accel_task_aux_data));
2513 	if (accel_ch->task_aux_data_base == NULL) {
2514 		goto err;
2515 	}
2516 
2517 	accel_ch->buf_pool_base = calloc(g_opts.buf_count, sizeof(struct accel_buffer));
2518 	if (accel_ch->buf_pool_base == NULL) {
2519 		goto err;
2520 	}
2521 
2522 	STAILQ_INIT(&accel_ch->task_pool);
2523 	SLIST_INIT(&accel_ch->task_aux_data_pool);
2524 	SLIST_INIT(&accel_ch->seq_pool);
2525 	SLIST_INIT(&accel_ch->buf_pool);
2526 
2527 	task_mem = accel_ch->task_pool_base;
2528 	for (i = 0; i < g_opts.task_count; i++) {
2529 		accel_task = (struct spdk_accel_task *)task_mem;
2530 		accel_task->aux = NULL;
2531 		STAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link);
2532 		task_mem += task_size_aligned;
2533 		accel_task_aux = &accel_ch->task_aux_data_base[i];
2534 		SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task_aux, link);
2535 	}
2536 	for (i = 0; i < g_opts.sequence_count; i++) {
2537 		seq = &accel_ch->seq_pool_base[i];
2538 		SLIST_INSERT_HEAD(&accel_ch->seq_pool, seq, link);
2539 	}
2540 	for (i = 0; i < g_opts.buf_count; i++) {
2541 		buf = &accel_ch->buf_pool_base[i];
2542 		SLIST_INSERT_HEAD(&accel_ch->buf_pool, buf, link);
2543 	}
2544 
2545 	/* Assign modules and get IO channels for each */
2546 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
2547 		accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel();
2548 		/* This can happen if idxd runs out of channels. */
2549 		if (accel_ch->module_ch[i] == NULL) {
2550 			SPDK_ERRLOG("Module %s failed to get io channel\n", g_modules_opc[i].module->name);
2551 			goto err;
2552 		}
2553 	}
2554 
2555 	if (g_accel_driver != NULL) {
2556 		accel_ch->driver_channel = g_accel_driver->get_io_channel();
2557 		if (accel_ch->driver_channel == NULL) {
2558 			SPDK_ERRLOG("Failed to get driver's IO channel\n");
2559 			goto err;
2560 		}
2561 	}
2562 
2563 	rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", g_opts.small_cache_size,
2564 				     g_opts.large_cache_size);
2565 	if (rc != 0) {
2566 		SPDK_ERRLOG("Failed to initialize iobuf accel channel\n");
2567 		goto err;
2568 	}
2569 
2570 	return 0;
2571 err:
2572 	if (accel_ch->driver_channel != NULL) {
2573 		spdk_put_io_channel(accel_ch->driver_channel);
2574 	}
2575 	for (j = 0; j < i; j++) {
2576 		spdk_put_io_channel(accel_ch->module_ch[j]);
2577 	}
2578 	free(accel_ch->task_pool_base);
2579 	free(accel_ch->task_aux_data_base);
2580 	free(accel_ch->seq_pool_base);
2581 	free(accel_ch->buf_pool_base);
2582 
2583 	return -ENOMEM;
2584 }
2585 
2586 static void
2587 accel_add_stats(struct accel_stats *total, struct accel_stats *stats)
2588 {
2589 	int i;
2590 
2591 	total->sequence_executed += stats->sequence_executed;
2592 	total->sequence_failed += stats->sequence_failed;
2593 	total->retry.task += stats->retry.task;
2594 	total->retry.sequence += stats->retry.sequence;
2595 	total->retry.iobuf += stats->retry.iobuf;
2596 	total->retry.bufdesc += stats->retry.bufdesc;
2597 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; ++i) {
2598 		total->operations[i].executed += stats->operations[i].executed;
2599 		total->operations[i].failed += stats->operations[i].failed;
2600 		total->operations[i].num_bytes += stats->operations[i].num_bytes;
2601 	}
2602 }
2603 
2604 /* Framework level channel destroy callback. */
2605 static void
2606 accel_destroy_channel(void *io_device, void *ctx_buf)
2607 {
2608 	struct accel_io_channel	*accel_ch = ctx_buf;
2609 	int i;
2610 
2611 	spdk_iobuf_channel_fini(&accel_ch->iobuf);
2612 
2613 	if (accel_ch->driver_channel != NULL) {
2614 		spdk_put_io_channel(accel_ch->driver_channel);
2615 	}
2616 
2617 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
2618 		assert(accel_ch->module_ch[i] != NULL);
2619 		spdk_put_io_channel(accel_ch->module_ch[i]);
2620 		accel_ch->module_ch[i] = NULL;
2621 	}
2622 
2623 	/* Update global stats to make sure channel's stats aren't lost after a channel is gone */
2624 	spdk_spin_lock(&g_stats_lock);
2625 	accel_add_stats(&g_stats, &accel_ch->stats);
2626 	spdk_spin_unlock(&g_stats_lock);
2627 
2628 	free(accel_ch->task_pool_base);
2629 	free(accel_ch->task_aux_data_base);
2630 	free(accel_ch->seq_pool_base);
2631 	free(accel_ch->buf_pool_base);
2632 }
2633 
2634 struct spdk_io_channel *
2635 spdk_accel_get_io_channel(void)
2636 {
2637 	return spdk_get_io_channel(&spdk_accel_module_list);
2638 }
2639 
2640 static int
2641 accel_module_initialize(void)
2642 {
2643 	struct spdk_accel_module_if *accel_module, *tmp_module;
2644 	int rc = 0, module_rc;
2645 
2646 	TAILQ_FOREACH_SAFE(accel_module, &spdk_accel_module_list, tailq, tmp_module) {
2647 		module_rc = accel_module->module_init();
2648 		if (module_rc) {
2649 			TAILQ_REMOVE(&spdk_accel_module_list, accel_module, tailq);
2650 			if (module_rc == -ENODEV) {
2651 				SPDK_NOTICELOG("No devices for module %s, skipping\n", accel_module->name);
2652 			} else if (!rc) {
2653 				SPDK_ERRLOG("Module %s initialization failed with %d\n", accel_module->name, module_rc);
2654 				rc = module_rc;
2655 			}
2656 			continue;
2657 		}
2658 
2659 		SPDK_DEBUGLOG(accel, "Module %s initialized.\n", accel_module->name);
2660 	}
2661 
2662 	return rc;
2663 }
2664 
2665 static void
2666 accel_module_init_opcode(enum spdk_accel_opcode opcode)
2667 {
2668 	struct accel_module *module = &g_modules_opc[opcode];
2669 	struct spdk_accel_module_if *module_if = module->module;
2670 
2671 	if (module_if->get_memory_domains != NULL) {
2672 		module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0;
2673 	}
2674 }
2675 
2676 static int
2677 accel_memory_domain_translate(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
2678 			      struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx,
2679 			      void *addr, size_t len, struct spdk_memory_domain_translation_result *result)
2680 {
2681 	struct accel_buffer *buf = src_domain_ctx;
2682 
2683 	SPDK_DEBUGLOG(accel, "translate addr %p, len %zu\n", addr, len);
2684 
2685 	assert(g_accel_domain == src_domain);
2686 	assert(spdk_memory_domain_get_system_domain() == dst_domain);
2687 	assert(buf->buf == NULL);
2688 	assert(addr == ACCEL_BUFFER_BASE);
2689 	assert(len == buf->len);
2690 
2691 	buf->buf = spdk_iobuf_get(&buf->ch->iobuf, buf->len, NULL, NULL);
2692 	if (spdk_unlikely(buf->buf == NULL)) {
2693 		return -ENOMEM;
2694 	}
2695 
2696 	result->iov_count = 1;
2697 	result->iov.iov_base = buf->buf;
2698 	result->iov.iov_len = buf->len;
2699 	SPDK_DEBUGLOG(accel, "translated addr %p\n", result->iov.iov_base);
2700 	return 0;
2701 }
2702 
2703 static void
2704 accel_memory_domain_invalidate(struct spdk_memory_domain *domain, void *domain_ctx,
2705 			       struct iovec *iov, uint32_t iovcnt)
2706 {
2707 	struct accel_buffer *buf = domain_ctx;
2708 
2709 	SPDK_DEBUGLOG(accel, "invalidate addr %p, len %zu\n", iov[0].iov_base, iov[0].iov_len);
2710 
2711 	assert(g_accel_domain == domain);
2712 	assert(iovcnt == 1);
2713 	assert(buf->buf != NULL);
2714 	assert(iov[0].iov_base == buf->buf);
2715 	assert(iov[0].iov_len == buf->len);
2716 
2717 	spdk_iobuf_put(&buf->ch->iobuf, buf->buf, buf->len);
2718 	buf->buf = NULL;
2719 }
2720 
2721 int
2722 spdk_accel_initialize(void)
2723 {
2724 	enum spdk_accel_opcode op;
2725 	struct spdk_accel_module_if *accel_module = NULL;
2726 	int rc;
2727 
2728 	/*
2729 	 * We need a unique identifier for the accel framework, so use the
2730 	 * spdk_accel_module_list address for this purpose.
2731 	 */
2732 	spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel,
2733 				sizeof(struct accel_io_channel), "accel");
2734 
2735 	spdk_spin_init(&g_keyring_spin);
2736 	spdk_spin_init(&g_stats_lock);
2737 
2738 	rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL,
2739 				       "SPDK_ACCEL_DMA_DEVICE");
2740 	if (rc != 0) {
2741 		SPDK_ERRLOG("Failed to create accel memory domain\n");
2742 		return rc;
2743 	}
2744 
2745 	spdk_memory_domain_set_translation(g_accel_domain, accel_memory_domain_translate);
2746 	spdk_memory_domain_set_invalidate(g_accel_domain, accel_memory_domain_invalidate);
2747 
2748 	g_modules_started = true;
2749 	rc = accel_module_initialize();
2750 	if (rc) {
2751 		return rc;
2752 	}
2753 
2754 	if (g_accel_driver != NULL && g_accel_driver->init != NULL) {
2755 		rc = g_accel_driver->init();
2756 		if (rc != 0) {
2757 			SPDK_ERRLOG("Failed to initialize driver %s: %s\n", g_accel_driver->name,
2758 				    spdk_strerror(-rc));
2759 			return rc;
2760 		}
2761 	}
2762 
2763 	/* The module list is order by priority, with the highest priority modules being at the end
2764 	 * of the list.  The software module should be somewhere at the beginning of the list,
2765 	 * before all HW modules.
2766 	 * NOTE: all opcodes must be supported by software in the event that no HW modules are
2767 	 * initialized to support the operation.
2768 	 */
2769 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2770 		for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
2771 			if (accel_module->supports_opcode(op)) {
2772 				g_modules_opc[op].module = accel_module;
2773 				SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name);
2774 			}
2775 		}
2776 
2777 		if (accel_module->get_ctx_size != NULL) {
2778 			g_max_accel_module_size = spdk_max(g_max_accel_module_size,
2779 							   accel_module->get_ctx_size());
2780 		}
2781 	}
2782 
2783 	/* Now lets check for overrides and apply all that exist */
2784 	for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
2785 		if (g_modules_opc_override[op] != NULL) {
2786 			accel_module = _module_find_by_name(g_modules_opc_override[op]);
2787 			if (accel_module == NULL) {
2788 				SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]);
2789 				return -EINVAL;
2790 			}
2791 			if (accel_module->supports_opcode(op) == false) {
2792 				SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op);
2793 				return -EINVAL;
2794 			}
2795 			g_modules_opc[op].module = accel_module;
2796 		}
2797 	}
2798 
2799 	if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) {
2800 		SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations");
2801 		return -EINVAL;
2802 	}
2803 
2804 	for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
2805 		assert(g_modules_opc[op].module != NULL);
2806 		accel_module_init_opcode(op);
2807 	}
2808 
2809 	rc = spdk_iobuf_register_module("accel");
2810 	if (rc != 0) {
2811 		SPDK_ERRLOG("Failed to register accel iobuf module\n");
2812 		return rc;
2813 	}
2814 
2815 	return 0;
2816 }
2817 
2818 static void
2819 accel_module_finish_cb(void)
2820 {
2821 	spdk_accel_fini_cb cb_fn = g_fini_cb_fn;
2822 
2823 	cb_fn(g_fini_cb_arg);
2824 	g_fini_cb_fn = NULL;
2825 	g_fini_cb_arg = NULL;
2826 }
2827 
2828 static void
2829 accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str,
2830 			   const char *module_str)
2831 {
2832 	spdk_json_write_object_begin(w);
2833 	spdk_json_write_named_string(w, "method", "accel_assign_opc");
2834 	spdk_json_write_named_object_begin(w, "params");
2835 	spdk_json_write_named_string(w, "opname", opc_str);
2836 	spdk_json_write_named_string(w, "module", module_str);
2837 	spdk_json_write_object_end(w);
2838 	spdk_json_write_object_end(w);
2839 }
2840 
2841 static void
2842 __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2843 {
2844 	spdk_json_write_named_string(w, "name", key->param.key_name);
2845 	spdk_json_write_named_string(w, "cipher", key->param.cipher);
2846 	spdk_json_write_named_string(w, "key", key->param.hex_key);
2847 	if (key->param.hex_key2) {
2848 		spdk_json_write_named_string(w, "key2", key->param.hex_key2);
2849 	}
2850 
2851 	if (key->param.tweak_mode) {
2852 		spdk_json_write_named_string(w, "tweak_mode", key->param.tweak_mode);
2853 	}
2854 }
2855 
2856 void
2857 _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2858 {
2859 	spdk_json_write_object_begin(w);
2860 	__accel_crypto_key_dump_param(w, key);
2861 	spdk_json_write_object_end(w);
2862 }
2863 
2864 static void
2865 _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w,
2866 				    struct spdk_accel_crypto_key *key)
2867 {
2868 	spdk_json_write_object_begin(w);
2869 	spdk_json_write_named_string(w, "method", "accel_crypto_key_create");
2870 	spdk_json_write_named_object_begin(w, "params");
2871 	__accel_crypto_key_dump_param(w, key);
2872 	spdk_json_write_object_end(w);
2873 	spdk_json_write_object_end(w);
2874 }
2875 
2876 static void
2877 accel_write_options(struct spdk_json_write_ctx *w)
2878 {
2879 	spdk_json_write_object_begin(w);
2880 	spdk_json_write_named_string(w, "method", "accel_set_options");
2881 	spdk_json_write_named_object_begin(w, "params");
2882 	spdk_json_write_named_uint32(w, "small_cache_size", g_opts.small_cache_size);
2883 	spdk_json_write_named_uint32(w, "large_cache_size", g_opts.large_cache_size);
2884 	spdk_json_write_named_uint32(w, "task_count", g_opts.task_count);
2885 	spdk_json_write_named_uint32(w, "sequence_count", g_opts.sequence_count);
2886 	spdk_json_write_named_uint32(w, "buf_count", g_opts.buf_count);
2887 	spdk_json_write_object_end(w);
2888 	spdk_json_write_object_end(w);
2889 }
2890 
2891 static void
2892 _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump)
2893 {
2894 	struct spdk_accel_crypto_key *key;
2895 
2896 	spdk_spin_lock(&g_keyring_spin);
2897 	TAILQ_FOREACH(key, &g_keyring, link) {
2898 		if (full_dump) {
2899 			_accel_crypto_key_write_config_json(w, key);
2900 		} else {
2901 			_accel_crypto_key_dump_param(w, key);
2902 		}
2903 	}
2904 	spdk_spin_unlock(&g_keyring_spin);
2905 }
2906 
2907 void
2908 _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w)
2909 {
2910 	_accel_crypto_keys_write_config_json(w, false);
2911 }
2912 
2913 void
2914 spdk_accel_write_config_json(struct spdk_json_write_ctx *w)
2915 {
2916 	struct spdk_accel_module_if *accel_module;
2917 	int i;
2918 
2919 	spdk_json_write_array_begin(w);
2920 	accel_write_options(w);
2921 
2922 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2923 		if (accel_module->write_config_json) {
2924 			accel_module->write_config_json(w);
2925 		}
2926 	}
2927 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
2928 		if (g_modules_opc_override[i]) {
2929 			accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]);
2930 		}
2931 	}
2932 
2933 	_accel_crypto_keys_write_config_json(w, true);
2934 
2935 	spdk_json_write_array_end(w);
2936 }
2937 
2938 void
2939 spdk_accel_module_finish(void)
2940 {
2941 	if (!g_accel_module) {
2942 		g_accel_module = TAILQ_FIRST(&spdk_accel_module_list);
2943 	} else {
2944 		g_accel_module = TAILQ_NEXT(g_accel_module, tailq);
2945 	}
2946 
2947 	if (!g_accel_module) {
2948 		if (g_accel_driver != NULL && g_accel_driver->fini != NULL) {
2949 			g_accel_driver->fini();
2950 		}
2951 
2952 		spdk_spin_destroy(&g_keyring_spin);
2953 		spdk_spin_destroy(&g_stats_lock);
2954 		if (g_accel_domain) {
2955 			spdk_memory_domain_destroy(g_accel_domain);
2956 			g_accel_domain = NULL;
2957 		}
2958 		accel_module_finish_cb();
2959 		return;
2960 	}
2961 
2962 	if (g_accel_module->module_fini) {
2963 		spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL);
2964 	} else {
2965 		spdk_accel_module_finish();
2966 	}
2967 }
2968 
2969 static void
2970 accel_io_device_unregister_cb(void *io_device)
2971 {
2972 	struct spdk_accel_crypto_key *key, *key_tmp;
2973 	enum spdk_accel_opcode op;
2974 
2975 	spdk_spin_lock(&g_keyring_spin);
2976 	TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) {
2977 		accel_crypto_key_destroy_unsafe(key);
2978 	}
2979 	spdk_spin_unlock(&g_keyring_spin);
2980 
2981 	for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
2982 		if (g_modules_opc_override[op] != NULL) {
2983 			free(g_modules_opc_override[op]);
2984 			g_modules_opc_override[op] = NULL;
2985 		}
2986 		g_modules_opc[op].module = NULL;
2987 	}
2988 
2989 	spdk_accel_module_finish();
2990 }
2991 
2992 void
2993 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg)
2994 {
2995 	assert(cb_fn != NULL);
2996 
2997 	g_fini_cb_fn = cb_fn;
2998 	g_fini_cb_arg = cb_arg;
2999 
3000 	spdk_io_device_unregister(&spdk_accel_module_list, accel_io_device_unregister_cb);
3001 }
3002 
3003 static struct spdk_accel_driver *
3004 accel_find_driver(const char *name)
3005 {
3006 	struct spdk_accel_driver *driver;
3007 
3008 	TAILQ_FOREACH(driver, &g_accel_drivers, tailq) {
3009 		if (strcmp(driver->name, name) == 0) {
3010 			return driver;
3011 		}
3012 	}
3013 
3014 	return NULL;
3015 }
3016 
3017 int
3018 spdk_accel_set_driver(const char *name)
3019 {
3020 	struct spdk_accel_driver *driver;
3021 
3022 	driver = accel_find_driver(name);
3023 	if (driver == NULL) {
3024 		SPDK_ERRLOG("Couldn't find driver named '%s'\n", name);
3025 		return -ENODEV;
3026 	}
3027 
3028 	g_accel_driver = driver;
3029 
3030 	return 0;
3031 }
3032 
3033 void
3034 spdk_accel_driver_register(struct spdk_accel_driver *driver)
3035 {
3036 	if (accel_find_driver(driver->name)) {
3037 		SPDK_ERRLOG("Driver named '%s' has already been registered\n", driver->name);
3038 		assert(0);
3039 		return;
3040 	}
3041 
3042 	TAILQ_INSERT_TAIL(&g_accel_drivers, driver, tailq);
3043 }
3044 
3045 int
3046 spdk_accel_set_opts(const struct spdk_accel_opts *opts)
3047 {
3048 	if (!opts) {
3049 		SPDK_ERRLOG("opts cannot be NULL\n");
3050 		return -1;
3051 	}
3052 
3053 	if (!opts->opts_size) {
3054 		SPDK_ERRLOG("opts_size inside opts cannot be zero value\n");
3055 		return -1;
3056 	}
3057 
3058 #define SET_FIELD(field) \
3059         if (offsetof(struct spdk_accel_opts, field) + sizeof(opts->field) <= opts->opts_size) { \
3060                 g_opts.field = opts->field; \
3061         } \
3062 
3063 	SET_FIELD(small_cache_size);
3064 	SET_FIELD(large_cache_size);
3065 	SET_FIELD(task_count);
3066 	SET_FIELD(sequence_count);
3067 	SET_FIELD(buf_count);
3068 
3069 	g_opts.opts_size = opts->opts_size;
3070 
3071 #undef SET_FIELD
3072 
3073 	return 0;
3074 }
3075 
3076 void
3077 spdk_accel_get_opts(struct spdk_accel_opts *opts, size_t opts_size)
3078 {
3079 	if (!opts) {
3080 		SPDK_ERRLOG("opts should not be NULL\n");
3081 		return;
3082 	}
3083 
3084 	if (!opts_size) {
3085 		SPDK_ERRLOG("opts_size should not be zero value\n");
3086 		return;
3087 	}
3088 
3089 	opts->opts_size = opts_size;
3090 
3091 #define SET_FIELD(field) \
3092 	if (offsetof(struct spdk_accel_opts, field) + sizeof(opts->field) <= opts_size) { \
3093 		opts->field = g_opts.field; \
3094 	} \
3095 
3096 	SET_FIELD(small_cache_size);
3097 	SET_FIELD(large_cache_size);
3098 	SET_FIELD(task_count);
3099 	SET_FIELD(sequence_count);
3100 	SET_FIELD(buf_count);
3101 
3102 #undef SET_FIELD
3103 
3104 	/* Do not remove this statement, you should always update this statement when you adding a new field,
3105 	 * and do not forget to add the SET_FIELD statement for your added field. */
3106 	SPDK_STATIC_ASSERT(sizeof(struct spdk_accel_opts) == 28, "Incorrect size");
3107 }
3108 
3109 struct accel_get_stats_ctx {
3110 	struct accel_stats	stats;
3111 	accel_get_stats_cb	cb_fn;
3112 	void			*cb_arg;
3113 };
3114 
3115 static void
3116 accel_get_channel_stats_done(struct spdk_io_channel_iter *iter, int status)
3117 {
3118 	struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
3119 
3120 	ctx->cb_fn(&ctx->stats, ctx->cb_arg);
3121 	free(ctx);
3122 }
3123 
3124 static void
3125 accel_get_channel_stats(struct spdk_io_channel_iter *iter)
3126 {
3127 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(iter);
3128 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
3129 	struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
3130 
3131 	accel_add_stats(&ctx->stats, &accel_ch->stats);
3132 	spdk_for_each_channel_continue(iter, 0);
3133 }
3134 
3135 int
3136 accel_get_stats(accel_get_stats_cb cb_fn, void *cb_arg)
3137 {
3138 	struct accel_get_stats_ctx *ctx;
3139 
3140 	ctx = calloc(1, sizeof(*ctx));
3141 	if (ctx == NULL) {
3142 		return -ENOMEM;
3143 	}
3144 
3145 	spdk_spin_lock(&g_stats_lock);
3146 	accel_add_stats(&ctx->stats, &g_stats);
3147 	spdk_spin_unlock(&g_stats_lock);
3148 
3149 	ctx->cb_fn = cb_fn;
3150 	ctx->cb_arg = cb_arg;
3151 
3152 	spdk_for_each_channel(&spdk_accel_module_list, accel_get_channel_stats, ctx,
3153 			      accel_get_channel_stats_done);
3154 
3155 	return 0;
3156 }
3157 
3158 void
3159 spdk_accel_get_opcode_stats(struct spdk_io_channel *ch, enum spdk_accel_opcode opcode,
3160 			    struct spdk_accel_opcode_stats *stats, size_t size)
3161 {
3162 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
3163 
3164 #define FIELD_OK(field) \
3165 	offsetof(struct spdk_accel_opcode_stats, field) + sizeof(stats->field) <= size
3166 
3167 #define SET_FIELD(field, value) \
3168 	if (FIELD_OK(field)) { \
3169 		stats->field = value; \
3170 	}
3171 
3172 	SET_FIELD(executed, accel_ch->stats.operations[opcode].executed);
3173 	SET_FIELD(failed, accel_ch->stats.operations[opcode].failed);
3174 	SET_FIELD(num_bytes, accel_ch->stats.operations[opcode].num_bytes);
3175 
3176 #undef FIELD_OK
3177 #undef SET_FIELD
3178 }
3179 
3180 uint8_t
3181 spdk_accel_get_buf_align(enum spdk_accel_opcode opcode,
3182 			 const struct spdk_accel_operation_exec_ctx *ctx)
3183 {
3184 	struct spdk_accel_module_if *module = g_modules_opc[opcode].module;
3185 	struct spdk_accel_opcode_info modinfo = {}, drvinfo = {};
3186 
3187 	if (g_accel_driver != NULL && g_accel_driver->get_operation_info != NULL) {
3188 		g_accel_driver->get_operation_info(opcode, ctx, &drvinfo);
3189 	}
3190 
3191 	if (module->get_operation_info != NULL) {
3192 		module->get_operation_info(opcode, ctx, &modinfo);
3193 	}
3194 
3195 	/* If a driver is set, it'll execute most of the operations, while the rest will usually
3196 	 * fall back to accel_sw, which doesn't have any alignment requiremenets.  However, to be
3197 	 * extra safe, return the max(driver, module) if a driver delegates some operations to a
3198 	 * hardware module. */
3199 	return spdk_max(modinfo.required_alignment, drvinfo.required_alignment);
3200 }
3201 
3202 struct spdk_accel_module_if *
3203 spdk_accel_get_module(const char *name)
3204 {
3205 	struct spdk_accel_module_if *module;
3206 
3207 	TAILQ_FOREACH(module, &spdk_accel_module_list, tailq) {
3208 		if (strcmp(module->name, name) == 0) {
3209 			return module;
3210 		}
3211 	}
3212 
3213 	return NULL;
3214 }
3215 
3216 SPDK_LOG_REGISTER_COMPONENT(accel)
3217