xref: /spdk/lib/accel/accel_sw.c (revision 1df4f4bb1853a48a986b44b49ce29af14f380bdd)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/accel_module.h"
10 #include "accel_internal.h"
11 
12 #include "spdk/env.h"
13 #include "spdk/likely.h"
14 #include "spdk/log.h"
15 #include "spdk/thread.h"
16 #include "spdk/json.h"
17 #include "spdk/crc32.h"
18 #include "spdk/util.h"
19 #include "spdk/xor.h"
20 
21 #ifdef SPDK_CONFIG_ISAL
22 #include "../isa-l/include/igzip_lib.h"
23 #ifdef SPDK_CONFIG_ISAL_CRYPTO
24 #include "../isa-l-crypto/include/aes_xts.h"
25 #endif
26 #endif
27 
28 #define ACCEL_AES_XTS_128_KEY_SIZE 16
29 #define ACCEL_AES_XTS_256_KEY_SIZE 32
30 
31 /* Per the AES-XTS spec, the size of data unit cannot be bigger than 2^20 blocks, 128b each block */
32 #define ACCEL_AES_XTS_MAX_BLOCK_SIZE (1 << 24)
33 
34 struct sw_accel_io_channel {
35 	/* for ISAL */
36 #ifdef SPDK_CONFIG_ISAL
37 	struct isal_zstream		stream;
38 	struct inflate_state		state;
39 #endif
40 	struct spdk_poller		*completion_poller;
41 	TAILQ_HEAD(, spdk_accel_task)	tasks_to_complete;
42 };
43 
44 typedef void (*sw_accel_crypto_op)(uint8_t *k2, uint8_t *k1, uint8_t *tweak, uint64_t lba_size,
45 				   const uint8_t *src, uint8_t *dst);
46 
47 struct sw_accel_crypto_key_data {
48 	sw_accel_crypto_op encrypt;
49 	sw_accel_crypto_op decrypt;
50 };
51 
52 static struct spdk_accel_module_if g_sw_module;
53 
54 static void sw_accel_crypto_key_deinit(struct spdk_accel_crypto_key *_key);
55 static int sw_accel_crypto_key_init(struct spdk_accel_crypto_key *key);
56 static bool sw_accel_crypto_supports_tweak_mode(enum spdk_accel_crypto_tweak_mode tweak_mode);
57 
58 /* Post SW completions to a list and complete in a poller as we don't want to
59  * complete them on the caller's stack as they'll likely submit another. */
60 inline static void
61 _add_to_comp_list(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task, int status)
62 {
63 	accel_task->status = status;
64 	TAILQ_INSERT_TAIL(&sw_ch->tasks_to_complete, accel_task, link);
65 }
66 
67 static bool
68 sw_accel_supports_opcode(enum accel_opcode opc)
69 {
70 	switch (opc) {
71 	case ACCEL_OPC_COPY:
72 	case ACCEL_OPC_FILL:
73 	case ACCEL_OPC_DUALCAST:
74 	case ACCEL_OPC_COMPARE:
75 	case ACCEL_OPC_CRC32C:
76 	case ACCEL_OPC_COPY_CRC32C:
77 	case ACCEL_OPC_COMPRESS:
78 	case ACCEL_OPC_DECOMPRESS:
79 	case ACCEL_OPC_ENCRYPT:
80 	case ACCEL_OPC_DECRYPT:
81 	case ACCEL_OPC_XOR:
82 		return true;
83 	default:
84 		return false;
85 	}
86 }
87 
88 static int
89 _sw_accel_dualcast_iovs(struct iovec *dst_iovs, uint32_t dst_iovcnt,
90 			struct iovec *dst2_iovs, uint32_t dst2_iovcnt,
91 			struct iovec *src_iovs, uint32_t src_iovcnt)
92 {
93 	if (spdk_unlikely(dst_iovcnt != 1 || dst2_iovcnt != 1 || src_iovcnt != 1)) {
94 		return -EINVAL;
95 	}
96 
97 	if (spdk_unlikely(dst_iovs[0].iov_len != src_iovs[0].iov_len ||
98 			  dst_iovs[0].iov_len != dst2_iovs[0].iov_len)) {
99 		return -EINVAL;
100 	}
101 
102 	memcpy(dst_iovs[0].iov_base, src_iovs[0].iov_base, dst_iovs[0].iov_len);
103 	memcpy(dst2_iovs[0].iov_base, src_iovs[0].iov_base, dst_iovs[0].iov_len);
104 
105 	return 0;
106 }
107 
108 static void
109 _sw_accel_copy_iovs(struct iovec *dst_iovs, uint32_t dst_iovcnt,
110 		    struct iovec *src_iovs, uint32_t src_iovcnt)
111 {
112 	struct spdk_ioviter iter;
113 	void *src, *dst;
114 	size_t len;
115 
116 	for (len = spdk_ioviter_first(&iter, src_iovs, src_iovcnt,
117 				      dst_iovs, dst_iovcnt, &src, &dst);
118 	     len != 0;
119 	     len = spdk_ioviter_next(&iter, &src, &dst)) {
120 		memcpy(dst, src, len);
121 	}
122 }
123 
124 static int
125 _sw_accel_compare(struct iovec *src_iovs, uint32_t src_iovcnt,
126 		  struct iovec *src2_iovs, uint32_t src2_iovcnt)
127 {
128 	if (spdk_unlikely(src_iovcnt != 1 || src2_iovcnt != 1)) {
129 		return -EINVAL;
130 	}
131 
132 	if (spdk_unlikely(src_iovs[0].iov_len != src2_iovs[0].iov_len)) {
133 		return -EINVAL;
134 	}
135 
136 	return memcmp(src_iovs[0].iov_base, src2_iovs[0].iov_base, src_iovs[0].iov_len);
137 }
138 
139 static int
140 _sw_accel_fill(struct iovec *iovs, uint32_t iovcnt, uint8_t fill)
141 {
142 	void *dst;
143 	size_t nbytes;
144 
145 	if (spdk_unlikely(iovcnt != 1)) {
146 		return -EINVAL;
147 	}
148 
149 	dst = iovs[0].iov_base;
150 	nbytes = iovs[0].iov_len;
151 
152 	memset(dst, fill, nbytes);
153 
154 	return 0;
155 }
156 
157 static void
158 _sw_accel_crc32cv(uint32_t *crc_dst, struct iovec *iov, uint32_t iovcnt, uint32_t seed)
159 {
160 	*crc_dst = spdk_crc32c_iov_update(iov, iovcnt, ~seed);
161 }
162 
163 static int
164 _sw_accel_compress(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
165 {
166 #ifdef SPDK_CONFIG_ISAL
167 	size_t last_seglen = accel_task->s.iovs[accel_task->s.iovcnt - 1].iov_len;
168 	struct iovec *siov = accel_task->s.iovs;
169 	struct iovec *diov = accel_task->d.iovs;
170 	size_t remaining;
171 	uint32_t i, s = 0, d = 0;
172 	int rc = 0;
173 
174 	remaining = 0;
175 	for (i = 0; i < accel_task->s.iovcnt; ++i) {
176 		remaining += accel_task->s.iovs[i].iov_len;
177 	}
178 
179 	isal_deflate_reset(&sw_ch->stream);
180 	sw_ch->stream.end_of_stream = 0;
181 	sw_ch->stream.next_out = diov[d].iov_base;
182 	sw_ch->stream.avail_out = diov[d].iov_len;
183 	sw_ch->stream.next_in = siov[s].iov_base;
184 	sw_ch->stream.avail_in = siov[s].iov_len;
185 
186 	do {
187 		/* if isal has exhausted the current dst iovec, move to the next
188 		 * one if there is one */
189 		if (sw_ch->stream.avail_out == 0) {
190 			if (++d < accel_task->d.iovcnt) {
191 				sw_ch->stream.next_out = diov[d].iov_base;
192 				sw_ch->stream.avail_out = diov[d].iov_len;
193 				assert(sw_ch->stream.avail_out > 0);
194 			} else {
195 				/* we have no avail_out but also no more iovecs left so this is
196 				* the case where either the output buffer was a perfect fit
197 				* or not enough was provided.  Check the ISAL state to determine
198 				* which. */
199 				if (sw_ch->stream.internal_state.state != ZSTATE_END) {
200 					SPDK_ERRLOG("Not enough destination buffer provided.\n");
201 					rc = -ENOMEM;
202 				}
203 				break;
204 			}
205 		}
206 
207 		/* if isal has exhausted the current src iovec, move to the next
208 		 * one if there is one */
209 		if (sw_ch->stream.avail_in == 0 && ((s + 1) < accel_task->s.iovcnt)) {
210 			s++;
211 			sw_ch->stream.next_in = siov[s].iov_base;
212 			sw_ch->stream.avail_in = siov[s].iov_len;
213 			assert(sw_ch->stream.avail_in > 0);
214 		}
215 
216 		if (remaining <= last_seglen) {
217 			/* Need to set end of stream on last block */
218 			sw_ch->stream.end_of_stream = 1;
219 		}
220 
221 		rc = isal_deflate(&sw_ch->stream);
222 		if (rc) {
223 			SPDK_ERRLOG("isal_deflate returned error %d.\n", rc);
224 		}
225 
226 		if (remaining > 0) {
227 			assert(siov[s].iov_len > sw_ch->stream.avail_in);
228 			remaining -= (siov[s].iov_len - sw_ch->stream.avail_in);
229 		}
230 
231 	} while (remaining > 0 || sw_ch->stream.avail_out == 0);
232 	assert(sw_ch->stream.avail_in  == 0);
233 
234 	/* Get our total output size */
235 	if (accel_task->output_size != NULL) {
236 		assert(sw_ch->stream.total_out > 0);
237 		*accel_task->output_size = sw_ch->stream.total_out;
238 	}
239 
240 	return rc;
241 #else
242 	SPDK_ERRLOG("ISAL option is required to use software compression.\n");
243 	return -EINVAL;
244 #endif
245 }
246 
247 static int
248 _sw_accel_decompress(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
249 {
250 #ifdef SPDK_CONFIG_ISAL
251 	struct iovec *siov = accel_task->s.iovs;
252 	struct iovec *diov = accel_task->d.iovs;
253 	uint32_t s = 0, d = 0;
254 	int rc = 0;
255 
256 	isal_inflate_reset(&sw_ch->state);
257 	sw_ch->state.next_out = diov[d].iov_base;
258 	sw_ch->state.avail_out = diov[d].iov_len;
259 	sw_ch->state.next_in = siov[s].iov_base;
260 	sw_ch->state.avail_in = siov[s].iov_len;
261 
262 	do {
263 		/* if isal has exhausted the current dst iovec, move to the next
264 		 * one if there is one */
265 		if (sw_ch->state.avail_out == 0 && ((d + 1) < accel_task->d.iovcnt)) {
266 			d++;
267 			sw_ch->state.next_out = diov[d].iov_base;
268 			sw_ch->state.avail_out = diov[d].iov_len;
269 			assert(sw_ch->state.avail_out > 0);
270 		}
271 
272 		/* if isal has exhausted the current src iovec, move to the next
273 		 * one if there is one */
274 		if (sw_ch->state.avail_in == 0 && ((s + 1) < accel_task->s.iovcnt)) {
275 			s++;
276 			sw_ch->state.next_in = siov[s].iov_base;
277 			sw_ch->state.avail_in = siov[s].iov_len;
278 			assert(sw_ch->state.avail_in > 0);
279 		}
280 
281 		rc = isal_inflate(&sw_ch->state);
282 		if (rc) {
283 			SPDK_ERRLOG("isal_inflate returned error %d.\n", rc);
284 		}
285 
286 	} while (sw_ch->state.block_state < ISAL_BLOCK_FINISH);
287 	assert(sw_ch->state.avail_in == 0);
288 
289 	/* Get our total output size */
290 	if (accel_task->output_size != NULL) {
291 		assert(sw_ch->state.total_out > 0);
292 		*accel_task->output_size = sw_ch->state.total_out;
293 	}
294 
295 	return rc;
296 #else
297 	SPDK_ERRLOG("ISAL option is required to use software decompression.\n");
298 	return -EINVAL;
299 #endif
300 }
301 
302 static int
303 _sw_accel_crypto_operation(struct spdk_accel_task *accel_task, struct spdk_accel_crypto_key *key,
304 			   sw_accel_crypto_op op)
305 {
306 #ifdef SPDK_CONFIG_ISAL_CRYPTO
307 	uint64_t iv[2];
308 	size_t remaining_len, dst_len;
309 	uint64_t src_offset = 0, dst_offset = 0;
310 	uint32_t src_iovpos = 0, dst_iovpos = 0, src_iovcnt, dst_iovcnt;
311 	uint32_t i, block_size, crypto_len, crypto_accum_len = 0;
312 	struct iovec *src_iov, *dst_iov;
313 	uint8_t *src, *dst;
314 
315 	/* iv is 128 bits, since we are using logical block address (64 bits) as iv, fill first 8 bytes with zeroes */
316 	iv[0] = 0;
317 	iv[1] = accel_task->iv;
318 	src_iov = accel_task->s.iovs;
319 	src_iovcnt = accel_task->s.iovcnt;
320 	if (accel_task->d.iovcnt) {
321 		dst_iov = accel_task->d.iovs;
322 		dst_iovcnt = accel_task->d.iovcnt;
323 	} else {
324 		/* inplace operation */
325 		dst_iov = accel_task->s.iovs;
326 		dst_iovcnt = accel_task->s.iovcnt;
327 	}
328 	block_size = accel_task->block_size;
329 
330 	if (!src_iovcnt || !dst_iovcnt || !block_size || !op) {
331 		SPDK_ERRLOG("src_iovcnt %d, dst_iovcnt %d, block_size %d, op %p\n", src_iovcnt, dst_iovcnt,
332 			    block_size, op);
333 		return -EINVAL;
334 	}
335 
336 	remaining_len = 0;
337 	for (i = 0; i < src_iovcnt; i++) {
338 		remaining_len += src_iov[i].iov_len;
339 	}
340 	dst_len = 0;
341 	for (i = 0; i < dst_iovcnt; i++) {
342 		dst_len += dst_iov[i].iov_len;
343 	}
344 
345 	if (spdk_unlikely(remaining_len != dst_len || !remaining_len)) {
346 		return -ERANGE;
347 	}
348 	if (spdk_unlikely(remaining_len % accel_task->block_size != 0)) {
349 		return -EINVAL;
350 	}
351 
352 	while (remaining_len) {
353 		crypto_len = spdk_min(block_size - crypto_accum_len, src_iov->iov_len - src_offset);
354 		crypto_len = spdk_min(crypto_len, dst_iov->iov_len - dst_offset);
355 		src = (uint8_t *)src_iov->iov_base + src_offset;
356 		dst = (uint8_t *)dst_iov->iov_base + dst_offset;
357 
358 		op((uint8_t *)key->key2, (uint8_t *)key->key, (uint8_t *)iv, crypto_len, src, dst);
359 
360 		src_offset += crypto_len;
361 		dst_offset += crypto_len;
362 		crypto_accum_len += crypto_len;
363 		remaining_len -= crypto_len;
364 
365 		if (crypto_accum_len == block_size) {
366 			/* we can process part of logical block. Once the whole block is processed, increment iv */
367 			crypto_accum_len = 0;
368 			iv[1]++;
369 		}
370 		if (src_offset == src_iov->iov_len) {
371 			src_iov++;
372 			src_iovpos++;
373 			src_offset = 0;
374 		}
375 		if (src_iovpos == src_iovcnt) {
376 			break;
377 		}
378 		if (dst_offset == dst_iov->iov_len) {
379 			dst_iov++;
380 			dst_iovpos++;
381 			dst_offset = 0;
382 		}
383 		if (dst_iovpos == dst_iovcnt) {
384 			break;
385 		}
386 	}
387 
388 	if (remaining_len) {
389 		SPDK_ERRLOG("remaining len %zu\n", remaining_len);
390 		return -EINVAL;
391 	}
392 
393 	return 0;
394 #else
395 	return -ENOTSUP;
396 #endif
397 }
398 
399 static int
400 _sw_accel_encrypt(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
401 {
402 	struct spdk_accel_crypto_key *key;
403 	struct sw_accel_crypto_key_data *key_data;
404 
405 	key = accel_task->crypto_key;
406 	if (spdk_unlikely(key->module_if != &g_sw_module || !key->priv)) {
407 		return -EINVAL;
408 	}
409 	if (spdk_unlikely(accel_task->block_size > ACCEL_AES_XTS_MAX_BLOCK_SIZE)) {
410 		SPDK_WARNLOG("Max block size for AES_XTS is limited to %u, current size %u\n",
411 			     ACCEL_AES_XTS_MAX_BLOCK_SIZE, accel_task->block_size);
412 		return -ERANGE;
413 	}
414 	key_data = key->priv;
415 	return _sw_accel_crypto_operation(accel_task, key, key_data->encrypt);
416 }
417 
418 static int
419 _sw_accel_decrypt(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
420 {
421 	struct spdk_accel_crypto_key *key;
422 	struct sw_accel_crypto_key_data *key_data;
423 
424 	key = accel_task->crypto_key;
425 	if (spdk_unlikely(key->module_if != &g_sw_module || !key->priv)) {
426 		return -EINVAL;
427 	}
428 	if (spdk_unlikely(accel_task->block_size > ACCEL_AES_XTS_MAX_BLOCK_SIZE)) {
429 		SPDK_WARNLOG("Max block size for AES_XTS is limited to %u, current size %u\n",
430 			     ACCEL_AES_XTS_MAX_BLOCK_SIZE, accel_task->block_size);
431 		return -ERANGE;
432 	}
433 	key_data = key->priv;
434 	return _sw_accel_crypto_operation(accel_task, key, key_data->decrypt);
435 }
436 
437 static int
438 _sw_accel_xor(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
439 {
440 	return spdk_xor_gen(accel_task->d.iovs[0].iov_base,
441 			    accel_task->nsrcs.srcs,
442 			    accel_task->nsrcs.cnt,
443 			    accel_task->d.iovs[0].iov_len);
444 }
445 
446 static int
447 sw_accel_submit_tasks(struct spdk_io_channel *ch, struct spdk_accel_task *accel_task)
448 {
449 	struct sw_accel_io_channel *sw_ch = spdk_io_channel_get_ctx(ch);
450 	struct spdk_accel_task *tmp;
451 	int rc = 0;
452 
453 	do {
454 		switch (accel_task->op_code) {
455 		case ACCEL_OPC_COPY:
456 			_sw_accel_copy_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
457 					    accel_task->s.iovs, accel_task->s.iovcnt);
458 			break;
459 		case ACCEL_OPC_FILL:
460 			rc = _sw_accel_fill(accel_task->d.iovs, accel_task->d.iovcnt,
461 					    accel_task->fill_pattern);
462 			break;
463 		case ACCEL_OPC_DUALCAST:
464 			rc = _sw_accel_dualcast_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
465 						     accel_task->d2.iovs, accel_task->d2.iovcnt,
466 						     accel_task->s.iovs, accel_task->s.iovcnt);
467 			break;
468 		case ACCEL_OPC_COMPARE:
469 			rc = _sw_accel_compare(accel_task->s.iovs, accel_task->s.iovcnt,
470 					       accel_task->s2.iovs, accel_task->s2.iovcnt);
471 			break;
472 		case ACCEL_OPC_CRC32C:
473 			_sw_accel_crc32cv(accel_task->crc_dst, accel_task->s.iovs, accel_task->s.iovcnt, accel_task->seed);
474 			break;
475 		case ACCEL_OPC_COPY_CRC32C:
476 			_sw_accel_copy_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
477 					    accel_task->s.iovs, accel_task->s.iovcnt);
478 			_sw_accel_crc32cv(accel_task->crc_dst, accel_task->s.iovs,
479 					  accel_task->s.iovcnt, accel_task->seed);
480 			break;
481 		case ACCEL_OPC_COMPRESS:
482 			rc = _sw_accel_compress(sw_ch, accel_task);
483 			break;
484 		case ACCEL_OPC_DECOMPRESS:
485 			rc = _sw_accel_decompress(sw_ch, accel_task);
486 			break;
487 		case ACCEL_OPC_XOR:
488 			rc = _sw_accel_xor(sw_ch, accel_task);
489 			break;
490 		case ACCEL_OPC_ENCRYPT:
491 			rc = _sw_accel_encrypt(sw_ch, accel_task);
492 			break;
493 		case ACCEL_OPC_DECRYPT:
494 			rc = _sw_accel_decrypt(sw_ch, accel_task);
495 			break;
496 		default:
497 			assert(false);
498 			break;
499 		}
500 
501 		tmp = TAILQ_NEXT(accel_task, link);
502 
503 		_add_to_comp_list(sw_ch, accel_task, rc);
504 
505 		accel_task = tmp;
506 	} while (accel_task);
507 
508 	return 0;
509 }
510 
511 static struct spdk_io_channel *sw_accel_get_io_channel(void);
512 static int sw_accel_module_init(void);
513 static void sw_accel_module_fini(void *ctxt);
514 static size_t sw_accel_module_get_ctx_size(void);
515 
516 static struct spdk_accel_module_if g_sw_module = {
517 	.module_init		= sw_accel_module_init,
518 	.module_fini		= sw_accel_module_fini,
519 	.write_config_json	= NULL,
520 	.get_ctx_size		= sw_accel_module_get_ctx_size,
521 	.name			= "software",
522 	.supports_opcode	= sw_accel_supports_opcode,
523 	.get_io_channel		= sw_accel_get_io_channel,
524 	.submit_tasks		= sw_accel_submit_tasks,
525 	.crypto_key_init	= sw_accel_crypto_key_init,
526 	.crypto_key_deinit	= sw_accel_crypto_key_deinit,
527 	.crypto_supports_tweak_mode	= sw_accel_crypto_supports_tweak_mode,
528 };
529 
530 static int
531 accel_comp_poll(void *arg)
532 {
533 	struct sw_accel_io_channel	*sw_ch = arg;
534 	TAILQ_HEAD(, spdk_accel_task)	tasks_to_complete;
535 	struct spdk_accel_task		*accel_task;
536 
537 	if (TAILQ_EMPTY(&sw_ch->tasks_to_complete)) {
538 		return SPDK_POLLER_IDLE;
539 	}
540 
541 	TAILQ_INIT(&tasks_to_complete);
542 	TAILQ_SWAP(&tasks_to_complete, &sw_ch->tasks_to_complete, spdk_accel_task, link);
543 
544 	while ((accel_task = TAILQ_FIRST(&tasks_to_complete))) {
545 		TAILQ_REMOVE(&tasks_to_complete, accel_task, link);
546 		spdk_accel_task_complete(accel_task, accel_task->status);
547 	}
548 
549 	return SPDK_POLLER_BUSY;
550 }
551 
552 static int
553 sw_accel_create_cb(void *io_device, void *ctx_buf)
554 {
555 	struct sw_accel_io_channel *sw_ch = ctx_buf;
556 
557 	TAILQ_INIT(&sw_ch->tasks_to_complete);
558 	sw_ch->completion_poller = SPDK_POLLER_REGISTER(accel_comp_poll, sw_ch, 0);
559 
560 #ifdef SPDK_CONFIG_ISAL
561 	isal_deflate_init(&sw_ch->stream);
562 	sw_ch->stream.flush = NO_FLUSH;
563 	sw_ch->stream.level = 1;
564 	sw_ch->stream.level_buf = calloc(1, ISAL_DEF_LVL1_DEFAULT);
565 	if (sw_ch->stream.level_buf == NULL) {
566 		SPDK_ERRLOG("Could not allocate isal internal buffer\n");
567 		return -ENOMEM;
568 	}
569 	sw_ch->stream.level_buf_size = ISAL_DEF_LVL1_DEFAULT;
570 	isal_inflate_init(&sw_ch->state);
571 #endif
572 
573 	return 0;
574 }
575 
576 static void
577 sw_accel_destroy_cb(void *io_device, void *ctx_buf)
578 {
579 	struct sw_accel_io_channel *sw_ch = ctx_buf;
580 
581 #ifdef SPDK_CONFIG_ISAL
582 	free(sw_ch->stream.level_buf);
583 #endif
584 
585 	spdk_poller_unregister(&sw_ch->completion_poller);
586 }
587 
588 static struct spdk_io_channel *
589 sw_accel_get_io_channel(void)
590 {
591 	return spdk_get_io_channel(&g_sw_module);
592 }
593 
594 static size_t
595 sw_accel_module_get_ctx_size(void)
596 {
597 	return sizeof(struct spdk_accel_task);
598 }
599 
600 static int
601 sw_accel_module_init(void)
602 {
603 	SPDK_NOTICELOG("Accel framework software module initialized.\n");
604 	spdk_io_device_register(&g_sw_module, sw_accel_create_cb, sw_accel_destroy_cb,
605 				sizeof(struct sw_accel_io_channel), "sw_accel_module");
606 
607 	return 0;
608 }
609 
610 static void
611 sw_accel_module_fini(void *ctxt)
612 {
613 	spdk_io_device_unregister(&g_sw_module, NULL);
614 	spdk_accel_module_finish();
615 }
616 
617 static int
618 sw_accel_create_aes_xts(struct spdk_accel_crypto_key *key)
619 {
620 #ifdef SPDK_CONFIG_ISAL_CRYPTO
621 	struct sw_accel_crypto_key_data *key_data;
622 
623 	key_data = calloc(1, sizeof(*key_data));
624 	if (!key_data) {
625 		return -ENOMEM;
626 	}
627 
628 	switch (key->key_size) {
629 	case ACCEL_AES_XTS_128_KEY_SIZE:
630 		key_data->encrypt = XTS_AES_128_enc;
631 		key_data->decrypt = XTS_AES_128_dec;
632 		break;
633 	case ACCEL_AES_XTS_256_KEY_SIZE:
634 		key_data->encrypt = XTS_AES_256_enc;
635 		key_data->decrypt = XTS_AES_256_dec;
636 		break;
637 	default:
638 		SPDK_ERRLOG("Incorrect key size  %zu, should be %d for AEX_XTS_128 or %d for AES_XTS_256\n",
639 			    key->key_size, ACCEL_AES_XTS_128_KEY_SIZE, ACCEL_AES_XTS_256_KEY_SIZE);
640 		free(key_data);
641 		return -EINVAL;
642 	}
643 
644 	key->priv = key_data;
645 
646 	return 0;
647 #else
648 	return -ENOTSUP;
649 #endif
650 }
651 
652 static int
653 sw_accel_crypto_key_init(struct spdk_accel_crypto_key *key)
654 {
655 	if (strcmp(key->param.cipher, ACCEL_AES_XTS) == 0) {
656 		return sw_accel_create_aes_xts(key);
657 	} else {
658 		SPDK_ERRLOG("Only %s cipher is supported\n", ACCEL_AES_XTS);
659 		return -EINVAL;
660 	}
661 }
662 
663 static void
664 sw_accel_crypto_key_deinit(struct spdk_accel_crypto_key *key)
665 {
666 	if (!key || key->module_if != &g_sw_module || !key->priv) {
667 		return;
668 	}
669 
670 	free(key->priv);
671 }
672 
673 static bool
674 sw_accel_crypto_supports_tweak_mode(enum spdk_accel_crypto_tweak_mode tweak_mode)
675 {
676 	return tweak_mode == SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA;
677 }
678 
679 SPDK_ACCEL_MODULE_REGISTER(sw, &g_sw_module)
680