xref: /spdk/lib/accel/accel_sw.c (revision 737667e1551fe845cf7fbb681010f1bf6b6c53ec)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/accel_module.h"
10 #include "accel_internal.h"
11 
12 #include "spdk/env.h"
13 #include "spdk/likely.h"
14 #include "spdk/log.h"
15 #include "spdk/thread.h"
16 #include "spdk/json.h"
17 #include "spdk/crc32.h"
18 #include "spdk/util.h"
19 #include "spdk/xor.h"
20 
21 #ifdef SPDK_CONFIG_ISAL
22 #include "../isa-l/include/igzip_lib.h"
23 #ifdef SPDK_CONFIG_ISAL_CRYPTO
24 #include "../isa-l-crypto/include/aes_xts.h"
25 #endif
26 #endif
27 
28 #define ACCEL_AES_XTS_128_KEY_SIZE 16
29 #define ACCEL_AES_XTS_256_KEY_SIZE 32
30 #define ACCEL_AES_XTS "AES_XTS"
31 /* Per the AES-XTS spec, the size of data unit cannot be bigger than 2^20 blocks, 128b each block */
32 #define ACCEL_AES_XTS_MAX_BLOCK_SIZE (1 << 24)
33 
34 struct sw_accel_io_channel {
35 	/* for ISAL */
36 #ifdef SPDK_CONFIG_ISAL
37 	struct isal_zstream		stream;
38 	struct inflate_state		state;
39 #endif
40 	struct spdk_poller		*completion_poller;
41 	TAILQ_HEAD(, spdk_accel_task)	tasks_to_complete;
42 };
43 
44 typedef void (*sw_accel_crypto_op)(uint8_t *k2, uint8_t *k1, uint8_t *tweak, uint64_t lba_size,
45 				   const uint8_t *src, uint8_t *dst);
46 
47 struct sw_accel_crypto_key_data {
48 	sw_accel_crypto_op encrypt;
49 	sw_accel_crypto_op decrypt;
50 };
51 
52 static struct spdk_accel_module_if g_sw_module;
53 
54 static void sw_accel_crypto_key_deinit(struct spdk_accel_crypto_key *_key);
55 static int sw_accel_crypto_key_init(struct spdk_accel_crypto_key *key);
56 
57 /* Post SW completions to a list and complete in a poller as we don't want to
58  * complete them on the caller's stack as they'll likely submit another. */
59 inline static void
60 _add_to_comp_list(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task, int status)
61 {
62 	accel_task->status = status;
63 	TAILQ_INSERT_TAIL(&sw_ch->tasks_to_complete, accel_task, link);
64 }
65 
66 static bool
67 sw_accel_supports_opcode(enum accel_opcode opc)
68 {
69 	switch (opc) {
70 	case ACCEL_OPC_COPY:
71 	case ACCEL_OPC_FILL:
72 	case ACCEL_OPC_DUALCAST:
73 	case ACCEL_OPC_COMPARE:
74 	case ACCEL_OPC_CRC32C:
75 	case ACCEL_OPC_COPY_CRC32C:
76 	case ACCEL_OPC_COMPRESS:
77 	case ACCEL_OPC_DECOMPRESS:
78 	case ACCEL_OPC_ENCRYPT:
79 	case ACCEL_OPC_DECRYPT:
80 	case ACCEL_OPC_XOR:
81 		return true;
82 	default:
83 		return false;
84 	}
85 }
86 
87 static int
88 _sw_accel_dualcast_iovs(struct iovec *dst_iovs, uint32_t dst_iovcnt,
89 			struct iovec *dst2_iovs, uint32_t dst2_iovcnt,
90 			struct iovec *src_iovs, uint32_t src_iovcnt)
91 {
92 	if (spdk_unlikely(dst_iovcnt != 1 || dst2_iovcnt != 1 || src_iovcnt != 1)) {
93 		return -EINVAL;
94 	}
95 
96 	if (spdk_unlikely(dst_iovs[0].iov_len != src_iovs[0].iov_len ||
97 			  dst_iovs[0].iov_len != dst2_iovs[0].iov_len)) {
98 		return -EINVAL;
99 	}
100 
101 	memcpy(dst_iovs[0].iov_base, src_iovs[0].iov_base, dst_iovs[0].iov_len);
102 	memcpy(dst2_iovs[0].iov_base, src_iovs[0].iov_base, dst_iovs[0].iov_len);
103 
104 	return 0;
105 }
106 
107 static void
108 _sw_accel_copy_iovs(struct iovec *dst_iovs, uint32_t dst_iovcnt,
109 		    struct iovec *src_iovs, uint32_t src_iovcnt)
110 {
111 	struct spdk_ioviter iter;
112 	void *src, *dst;
113 	size_t len;
114 
115 	for (len = spdk_ioviter_first(&iter, src_iovs, src_iovcnt,
116 				      dst_iovs, dst_iovcnt, &src, &dst);
117 	     len != 0;
118 	     len = spdk_ioviter_next(&iter, &src, &dst)) {
119 		memcpy(dst, src, len);
120 	}
121 }
122 
123 static int
124 _sw_accel_compare(struct iovec *src_iovs, uint32_t src_iovcnt,
125 		  struct iovec *src2_iovs, uint32_t src2_iovcnt)
126 {
127 	if (spdk_unlikely(src_iovcnt != 1 || src2_iovcnt != 1)) {
128 		return -EINVAL;
129 	}
130 
131 	if (spdk_unlikely(src_iovs[0].iov_len != src2_iovs[0].iov_len)) {
132 		return -EINVAL;
133 	}
134 
135 	return memcmp(src_iovs[0].iov_base, src2_iovs[0].iov_base, src_iovs[0].iov_len);
136 }
137 
138 static int
139 _sw_accel_fill(struct iovec *iovs, uint32_t iovcnt, uint8_t fill)
140 {
141 	void *dst;
142 	size_t nbytes;
143 
144 	if (spdk_unlikely(iovcnt != 1)) {
145 		return -EINVAL;
146 	}
147 
148 	dst = iovs[0].iov_base;
149 	nbytes = iovs[0].iov_len;
150 
151 	memset(dst, fill, nbytes);
152 
153 	return 0;
154 }
155 
156 static void
157 _sw_accel_crc32cv(uint32_t *crc_dst, struct iovec *iov, uint32_t iovcnt, uint32_t seed)
158 {
159 	*crc_dst = spdk_crc32c_iov_update(iov, iovcnt, ~seed);
160 }
161 
162 static int
163 _sw_accel_compress(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
164 {
165 #ifdef SPDK_CONFIG_ISAL
166 	size_t last_seglen = accel_task->s.iovs[accel_task->s.iovcnt - 1].iov_len;
167 	struct iovec *siov = accel_task->s.iovs;
168 	struct iovec *diov = accel_task->d.iovs;
169 	size_t remaining;
170 	uint32_t i, s = 0, d = 0;
171 	int rc = 0;
172 
173 	remaining = 0;
174 	for (i = 0; i < accel_task->s.iovcnt; ++i) {
175 		remaining += accel_task->s.iovs[i].iov_len;
176 	}
177 
178 	isal_deflate_reset(&sw_ch->stream);
179 	sw_ch->stream.end_of_stream = 0;
180 	sw_ch->stream.next_out = diov[d].iov_base;
181 	sw_ch->stream.avail_out = diov[d].iov_len;
182 	sw_ch->stream.next_in = siov[s].iov_base;
183 	sw_ch->stream.avail_in = siov[s].iov_len;
184 
185 	do {
186 		/* if isal has exhausted the current dst iovec, move to the next
187 		 * one if there is one */
188 		if (sw_ch->stream.avail_out == 0) {
189 			if (++d < accel_task->d.iovcnt) {
190 				sw_ch->stream.next_out = diov[d].iov_base;
191 				sw_ch->stream.avail_out = diov[d].iov_len;
192 				assert(sw_ch->stream.avail_out > 0);
193 			} else {
194 				/* we have no avail_out but also no more iovecs left so this is
195 				* the case where either the output buffer was a perfect fit
196 				* or not enough was provided.  Check the ISAL state to determine
197 				* which. */
198 				if (sw_ch->stream.internal_state.state != ZSTATE_END) {
199 					SPDK_ERRLOG("Not enough destination buffer provided.\n");
200 					rc = -ENOMEM;
201 				}
202 				break;
203 			}
204 		}
205 
206 		/* if isal has exhausted the current src iovec, move to the next
207 		 * one if there is one */
208 		if (sw_ch->stream.avail_in == 0 && ((s + 1) < accel_task->s.iovcnt)) {
209 			s++;
210 			sw_ch->stream.next_in = siov[s].iov_base;
211 			sw_ch->stream.avail_in = siov[s].iov_len;
212 			assert(sw_ch->stream.avail_in > 0);
213 		}
214 
215 		if (remaining <= last_seglen) {
216 			/* Need to set end of stream on last block */
217 			sw_ch->stream.end_of_stream = 1;
218 		}
219 
220 		rc = isal_deflate(&sw_ch->stream);
221 		if (rc) {
222 			SPDK_ERRLOG("isal_deflate returned error %d.\n", rc);
223 		}
224 
225 		if (remaining > 0) {
226 			assert(siov[s].iov_len > sw_ch->stream.avail_in);
227 			remaining -= (siov[s].iov_len - sw_ch->stream.avail_in);
228 		}
229 
230 	} while (remaining > 0 || sw_ch->stream.avail_out == 0);
231 	assert(sw_ch->stream.avail_in  == 0);
232 
233 	/* Get our total output size */
234 	if (accel_task->output_size != NULL) {
235 		assert(sw_ch->stream.total_out > 0);
236 		*accel_task->output_size = sw_ch->stream.total_out;
237 	}
238 
239 	return rc;
240 #else
241 	SPDK_ERRLOG("ISAL option is required to use software compression.\n");
242 	return -EINVAL;
243 #endif
244 }
245 
246 static int
247 _sw_accel_decompress(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
248 {
249 #ifdef SPDK_CONFIG_ISAL
250 	struct iovec *siov = accel_task->s.iovs;
251 	struct iovec *diov = accel_task->d.iovs;
252 	uint32_t s = 0, d = 0;
253 	int rc = 0;
254 
255 	isal_inflate_reset(&sw_ch->state);
256 	sw_ch->state.next_out = diov[d].iov_base;
257 	sw_ch->state.avail_out = diov[d].iov_len;
258 	sw_ch->state.next_in = siov[s].iov_base;
259 	sw_ch->state.avail_in = siov[s].iov_len;
260 
261 	do {
262 		/* if isal has exhausted the current dst iovec, move to the next
263 		 * one if there is one */
264 		if (sw_ch->state.avail_out == 0 && ((d + 1) < accel_task->d.iovcnt)) {
265 			d++;
266 			sw_ch->state.next_out = diov[d].iov_base;
267 			sw_ch->state.avail_out = diov[d].iov_len;
268 			assert(sw_ch->state.avail_out > 0);
269 		}
270 
271 		/* if isal has exhausted the current src iovec, move to the next
272 		 * one if there is one */
273 		if (sw_ch->state.avail_in == 0 && ((s + 1) < accel_task->s.iovcnt)) {
274 			s++;
275 			sw_ch->state.next_in = siov[s].iov_base;
276 			sw_ch->state.avail_in = siov[s].iov_len;
277 			assert(sw_ch->state.avail_in > 0);
278 		}
279 
280 		rc = isal_inflate(&sw_ch->state);
281 		if (rc) {
282 			SPDK_ERRLOG("isal_inflate returned error %d.\n", rc);
283 		}
284 
285 	} while (sw_ch->state.block_state < ISAL_BLOCK_FINISH);
286 	assert(sw_ch->state.avail_in == 0);
287 
288 	/* Get our total output size */
289 	if (accel_task->output_size != NULL) {
290 		assert(sw_ch->state.total_out > 0);
291 		*accel_task->output_size = sw_ch->state.total_out;
292 	}
293 
294 	return rc;
295 #else
296 	SPDK_ERRLOG("ISAL option is required to use software decompression.\n");
297 	return -EINVAL;
298 #endif
299 }
300 
301 static int
302 _sw_accel_crypto_operation(struct spdk_accel_task *accel_task, struct spdk_accel_crypto_key *key,
303 			   sw_accel_crypto_op op)
304 {
305 #ifdef SPDK_CONFIG_ISAL_CRYPTO
306 	uint64_t iv[2];
307 	size_t remaining_len, dst_len;
308 	uint64_t src_offset = 0, dst_offset = 0;
309 	uint32_t src_iovpos = 0, dst_iovpos = 0, src_iovcnt, dst_iovcnt;
310 	uint32_t i, block_size, crypto_len, crypto_accum_len = 0;
311 	struct iovec *src_iov, *dst_iov;
312 	uint8_t *src, *dst;
313 
314 	/* iv is 128 bits, since we are using logical block address (64 bits) as iv, fill first 8 bytes with zeroes */
315 	iv[0] = 0;
316 	iv[1] = accel_task->iv;
317 	src_iov = accel_task->s.iovs;
318 	src_iovcnt = accel_task->s.iovcnt;
319 	if (accel_task->d.iovcnt) {
320 		dst_iov = accel_task->d.iovs;
321 		dst_iovcnt = accel_task->d.iovcnt;
322 	} else {
323 		/* inplace operation */
324 		dst_iov = accel_task->s.iovs;
325 		dst_iovcnt = accel_task->s.iovcnt;
326 	}
327 	block_size = accel_task->block_size;
328 
329 	if (!src_iovcnt || !dst_iovcnt || !block_size || !op) {
330 		SPDK_ERRLOG("src_iovcnt %d, dst_iovcnt %d, block_size %d, op %p\n", src_iovcnt, dst_iovcnt,
331 			    block_size, op);
332 		return -EINVAL;
333 	}
334 
335 	remaining_len = 0;
336 	for (i = 0; i < src_iovcnt; i++) {
337 		remaining_len += src_iov[i].iov_len;
338 	}
339 	dst_len = 0;
340 	for (i = 0; i < dst_iovcnt; i++) {
341 		dst_len += dst_iov[i].iov_len;
342 	}
343 
344 	if (spdk_unlikely(remaining_len != dst_len || !remaining_len)) {
345 		return -ERANGE;
346 	}
347 	if (spdk_unlikely(remaining_len % accel_task->block_size != 0)) {
348 		return -EINVAL;
349 	}
350 
351 	while (remaining_len) {
352 		crypto_len = spdk_min(block_size - crypto_accum_len, src_iov->iov_len - src_offset);
353 		crypto_len = spdk_min(crypto_len, dst_iov->iov_len - dst_offset);
354 		src = (uint8_t *)src_iov->iov_base + src_offset;
355 		dst = (uint8_t *)dst_iov->iov_base + dst_offset;
356 
357 		op((uint8_t *)key->key2, (uint8_t *)key->key, (uint8_t *)iv, crypto_len, src, dst);
358 
359 		src_offset += crypto_len;
360 		dst_offset += crypto_len;
361 		crypto_accum_len += crypto_len;
362 		remaining_len -= crypto_len;
363 
364 		if (crypto_accum_len == block_size) {
365 			/* we can process part of logical block. Once the whole block is processed, increment iv */
366 			crypto_accum_len = 0;
367 			iv[1]++;
368 		}
369 		if (src_offset == src_iov->iov_len) {
370 			src_iov++;
371 			src_iovpos++;
372 			src_offset = 0;
373 		}
374 		if (src_iovpos == src_iovcnt) {
375 			break;
376 		}
377 		if (dst_offset == dst_iov->iov_len) {
378 			dst_iov++;
379 			dst_iovpos++;
380 			dst_offset = 0;
381 		}
382 		if (dst_iovpos == dst_iovcnt) {
383 			break;
384 		}
385 	}
386 
387 	if (remaining_len) {
388 		SPDK_ERRLOG("remaining len %zu\n", remaining_len);
389 		return -EINVAL;
390 	}
391 
392 	return 0;
393 #else
394 	return -ENOTSUP;
395 #endif
396 }
397 
398 static int
399 _sw_accel_encrypt(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
400 {
401 	struct spdk_accel_crypto_key *key;
402 	struct sw_accel_crypto_key_data *key_data;
403 
404 	key = accel_task->crypto_key;
405 	if (spdk_unlikely(key->module_if != &g_sw_module || !key->priv)) {
406 		return -EINVAL;
407 	}
408 	if (spdk_unlikely(accel_task->block_size > ACCEL_AES_XTS_MAX_BLOCK_SIZE)) {
409 		SPDK_WARNLOG("Max block size for AES_XTS is limited to %u, current size %u\n",
410 			     ACCEL_AES_XTS_MAX_BLOCK_SIZE, accel_task->block_size);
411 		return -ERANGE;
412 	}
413 	key_data = key->priv;
414 	return _sw_accel_crypto_operation(accel_task, key, key_data->encrypt);
415 }
416 
417 static int
418 _sw_accel_decrypt(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
419 {
420 	struct spdk_accel_crypto_key *key;
421 	struct sw_accel_crypto_key_data *key_data;
422 
423 	key = accel_task->crypto_key;
424 	if (spdk_unlikely(key->module_if != &g_sw_module || !key->priv)) {
425 		return -EINVAL;
426 	}
427 	if (spdk_unlikely(accel_task->block_size > ACCEL_AES_XTS_MAX_BLOCK_SIZE)) {
428 		SPDK_WARNLOG("Max block size for AES_XTS is limited to %u, current size %u\n",
429 			     ACCEL_AES_XTS_MAX_BLOCK_SIZE, accel_task->block_size);
430 		return -ERANGE;
431 	}
432 	key_data = key->priv;
433 	return _sw_accel_crypto_operation(accel_task, key, key_data->decrypt);
434 }
435 
436 static int
437 _sw_accel_xor(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
438 {
439 	return spdk_xor_gen(accel_task->d.iovs[0].iov_base,
440 			    accel_task->nsrcs.srcs,
441 			    accel_task->nsrcs.cnt,
442 			    accel_task->d.iovs[0].iov_len);
443 }
444 
445 static int
446 sw_accel_submit_tasks(struct spdk_io_channel *ch, struct spdk_accel_task *accel_task)
447 {
448 	struct sw_accel_io_channel *sw_ch = spdk_io_channel_get_ctx(ch);
449 	struct spdk_accel_task *tmp;
450 	int rc = 0;
451 
452 	do {
453 		switch (accel_task->op_code) {
454 		case ACCEL_OPC_COPY:
455 			_sw_accel_copy_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
456 					    accel_task->s.iovs, accel_task->s.iovcnt);
457 			break;
458 		case ACCEL_OPC_FILL:
459 			rc = _sw_accel_fill(accel_task->d.iovs, accel_task->d.iovcnt,
460 					    accel_task->fill_pattern);
461 			break;
462 		case ACCEL_OPC_DUALCAST:
463 			rc = _sw_accel_dualcast_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
464 						     accel_task->d2.iovs, accel_task->d2.iovcnt,
465 						     accel_task->s.iovs, accel_task->s.iovcnt);
466 			break;
467 		case ACCEL_OPC_COMPARE:
468 			rc = _sw_accel_compare(accel_task->s.iovs, accel_task->s.iovcnt,
469 					       accel_task->s2.iovs, accel_task->s2.iovcnt);
470 			break;
471 		case ACCEL_OPC_CRC32C:
472 			_sw_accel_crc32cv(accel_task->crc_dst, accel_task->s.iovs, accel_task->s.iovcnt, accel_task->seed);
473 			break;
474 		case ACCEL_OPC_COPY_CRC32C:
475 			_sw_accel_copy_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
476 					    accel_task->s.iovs, accel_task->s.iovcnt);
477 			_sw_accel_crc32cv(accel_task->crc_dst, accel_task->s.iovs,
478 					  accel_task->s.iovcnt, accel_task->seed);
479 			break;
480 		case ACCEL_OPC_COMPRESS:
481 			rc = _sw_accel_compress(sw_ch, accel_task);
482 			break;
483 		case ACCEL_OPC_DECOMPRESS:
484 			rc = _sw_accel_decompress(sw_ch, accel_task);
485 			break;
486 		case ACCEL_OPC_XOR:
487 			rc = _sw_accel_xor(sw_ch, accel_task);
488 			break;
489 		case ACCEL_OPC_ENCRYPT:
490 			rc = _sw_accel_encrypt(sw_ch, accel_task);
491 			break;
492 		case ACCEL_OPC_DECRYPT:
493 			rc = _sw_accel_decrypt(sw_ch, accel_task);
494 			break;
495 		default:
496 			assert(false);
497 			break;
498 		}
499 
500 		tmp = TAILQ_NEXT(accel_task, link);
501 
502 		_add_to_comp_list(sw_ch, accel_task, rc);
503 
504 		accel_task = tmp;
505 	} while (accel_task);
506 
507 	return 0;
508 }
509 
510 static struct spdk_io_channel *sw_accel_get_io_channel(void);
511 static int sw_accel_module_init(void);
512 static void sw_accel_module_fini(void *ctxt);
513 static size_t sw_accel_module_get_ctx_size(void);
514 
515 static struct spdk_accel_module_if g_sw_module = {
516 	.module_init		= sw_accel_module_init,
517 	.module_fini		= sw_accel_module_fini,
518 	.write_config_json	= NULL,
519 	.get_ctx_size		= sw_accel_module_get_ctx_size,
520 	.name			= "software",
521 	.supports_opcode	= sw_accel_supports_opcode,
522 	.get_io_channel		= sw_accel_get_io_channel,
523 	.submit_tasks		= sw_accel_submit_tasks,
524 	.crypto_key_init	= sw_accel_crypto_key_init,
525 	.crypto_key_deinit	= sw_accel_crypto_key_deinit,
526 };
527 
528 static int
529 accel_comp_poll(void *arg)
530 {
531 	struct sw_accel_io_channel	*sw_ch = arg;
532 	TAILQ_HEAD(, spdk_accel_task)	tasks_to_complete;
533 	struct spdk_accel_task		*accel_task;
534 
535 	if (TAILQ_EMPTY(&sw_ch->tasks_to_complete)) {
536 		return SPDK_POLLER_IDLE;
537 	}
538 
539 	TAILQ_INIT(&tasks_to_complete);
540 	TAILQ_SWAP(&tasks_to_complete, &sw_ch->tasks_to_complete, spdk_accel_task, link);
541 
542 	while ((accel_task = TAILQ_FIRST(&tasks_to_complete))) {
543 		TAILQ_REMOVE(&tasks_to_complete, accel_task, link);
544 		spdk_accel_task_complete(accel_task, accel_task->status);
545 	}
546 
547 	return SPDK_POLLER_BUSY;
548 }
549 
550 static int
551 sw_accel_create_cb(void *io_device, void *ctx_buf)
552 {
553 	struct sw_accel_io_channel *sw_ch = ctx_buf;
554 
555 	TAILQ_INIT(&sw_ch->tasks_to_complete);
556 	sw_ch->completion_poller = SPDK_POLLER_REGISTER(accel_comp_poll, sw_ch, 0);
557 
558 #ifdef SPDK_CONFIG_ISAL
559 	isal_deflate_init(&sw_ch->stream);
560 	sw_ch->stream.flush = NO_FLUSH;
561 	sw_ch->stream.level = 1;
562 	sw_ch->stream.level_buf = calloc(1, ISAL_DEF_LVL1_DEFAULT);
563 	if (sw_ch->stream.level_buf == NULL) {
564 		SPDK_ERRLOG("Could not allocate isal internal buffer\n");
565 		return -ENOMEM;
566 	}
567 	sw_ch->stream.level_buf_size = ISAL_DEF_LVL1_DEFAULT;
568 	isal_inflate_init(&sw_ch->state);
569 #endif
570 
571 	return 0;
572 }
573 
574 static void
575 sw_accel_destroy_cb(void *io_device, void *ctx_buf)
576 {
577 	struct sw_accel_io_channel *sw_ch = ctx_buf;
578 
579 #ifdef SPDK_CONFIG_ISAL
580 	free(sw_ch->stream.level_buf);
581 #endif
582 
583 	spdk_poller_unregister(&sw_ch->completion_poller);
584 }
585 
586 static struct spdk_io_channel *
587 sw_accel_get_io_channel(void)
588 {
589 	return spdk_get_io_channel(&g_sw_module);
590 }
591 
592 static size_t
593 sw_accel_module_get_ctx_size(void)
594 {
595 	return sizeof(struct spdk_accel_task);
596 }
597 
598 static int
599 sw_accel_module_init(void)
600 {
601 	SPDK_NOTICELOG("Accel framework software module initialized.\n");
602 	spdk_io_device_register(&g_sw_module, sw_accel_create_cb, sw_accel_destroy_cb,
603 				sizeof(struct sw_accel_io_channel), "sw_accel_module");
604 
605 	return 0;
606 }
607 
608 static void
609 sw_accel_module_fini(void *ctxt)
610 {
611 	spdk_io_device_unregister(&g_sw_module, NULL);
612 	spdk_accel_module_finish();
613 }
614 
615 static int
616 sw_accel_create_aes_xts(struct spdk_accel_crypto_key *key)
617 {
618 #ifdef SPDK_CONFIG_ISAL_CRYPTO
619 	struct sw_accel_crypto_key_data *key_data;
620 
621 	if (!key->key || !key->key2) {
622 		SPDK_ERRLOG("key or key2 are missing\n");
623 		return -EINVAL;
624 	}
625 
626 	if (!key->key_size || key->key_size != key->key2_size) {
627 		SPDK_ERRLOG("key size %zu is not equal to key2 size %zu or is 0\n", key->key_size,
628 			    key->key2_size);
629 		return -EINVAL;
630 	}
631 
632 	key_data = calloc(1, sizeof(*key_data));
633 	if (!key_data) {
634 		return -ENOMEM;
635 	}
636 
637 	switch (key->key_size) {
638 	case ACCEL_AES_XTS_128_KEY_SIZE:
639 		key_data->encrypt = XTS_AES_128_enc;
640 		key_data->decrypt = XTS_AES_128_dec;
641 		break;
642 	case ACCEL_AES_XTS_256_KEY_SIZE:
643 		key_data->encrypt = XTS_AES_256_enc;
644 		key_data->decrypt = XTS_AES_256_dec;
645 		break;
646 	default:
647 		SPDK_ERRLOG("Incorrect key size  %zu, should be %d for AEX_XTS_128 or %d for AES_XTS_256\n",
648 			    key->key_size, ACCEL_AES_XTS_128_KEY_SIZE, ACCEL_AES_XTS_256_KEY_SIZE);
649 		free(key_data);
650 		return -EINVAL;
651 	}
652 
653 	key->priv = key_data;
654 
655 	return 0;
656 #else
657 	return -ENOTSUP;
658 #endif
659 }
660 
661 static int
662 sw_accel_crypto_key_init(struct spdk_accel_crypto_key *key)
663 {
664 	if (!key || !key->param.cipher) {
665 		return -EINVAL;
666 	}
667 	if (strcmp(key->param.cipher, ACCEL_AES_XTS) == 0) {
668 		return sw_accel_create_aes_xts(key);
669 	} else {
670 		SPDK_ERRLOG("Only %s cipher is supported\n", ACCEL_AES_XTS);
671 		return -EINVAL;
672 	}
673 }
674 
675 static void
676 sw_accel_crypto_key_deinit(struct spdk_accel_crypto_key *key)
677 {
678 	if (!key || key->module_if != &g_sw_module || !key->priv) {
679 		return;
680 	}
681 
682 	free(key->priv);
683 }
684 
685 SPDK_ACCEL_MODULE_REGISTER(sw, &g_sw_module)
686