xref: /spdk/lib/accel/accel_sw.c (revision 60982c759db49b4f4579f16e3b24df0725ba4b94)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/accel_module.h"
10 #include "accel_internal.h"
11 
12 #include "spdk/env.h"
13 #include "spdk/likely.h"
14 #include "spdk/log.h"
15 #include "spdk/thread.h"
16 #include "spdk/json.h"
17 #include "spdk/crc32.h"
18 #include "spdk/util.h"
19 #include "spdk/xor.h"
20 
21 #ifdef SPDK_CONFIG_ISAL
22 #include "../isa-l/include/igzip_lib.h"
23 #ifdef SPDK_CONFIG_ISAL_CRYPTO
24 #include "../isa-l-crypto/include/aes_xts.h"
25 #endif
26 #endif
27 
28 /* Per the AES-XTS spec, the size of data unit cannot be bigger than 2^20 blocks, 128b each block */
29 #define ACCEL_AES_XTS_MAX_BLOCK_SIZE (1 << 24)
30 
31 struct sw_accel_io_channel {
32 	/* for ISAL */
33 #ifdef SPDK_CONFIG_ISAL
34 	struct isal_zstream		stream;
35 	struct inflate_state		state;
36 #endif
37 	struct spdk_poller		*completion_poller;
38 	TAILQ_HEAD(, spdk_accel_task)	tasks_to_complete;
39 };
40 
41 typedef void (*sw_accel_crypto_op)(uint8_t *k2, uint8_t *k1, uint8_t *tweak, uint64_t lba_size,
42 				   const uint8_t *src, uint8_t *dst);
43 
44 struct sw_accel_crypto_key_data {
45 	sw_accel_crypto_op encrypt;
46 	sw_accel_crypto_op decrypt;
47 };
48 
49 static struct spdk_accel_module_if g_sw_module;
50 
51 static void sw_accel_crypto_key_deinit(struct spdk_accel_crypto_key *_key);
52 static int sw_accel_crypto_key_init(struct spdk_accel_crypto_key *key);
53 static bool sw_accel_crypto_supports_tweak_mode(enum spdk_accel_crypto_tweak_mode tweak_mode);
54 static bool sw_accel_crypto_supports_cipher(enum spdk_accel_cipher cipher, size_t key_size);
55 
56 /* Post SW completions to a list and complete in a poller as we don't want to
57  * complete them on the caller's stack as they'll likely submit another. */
58 inline static void
59 _add_to_comp_list(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task, int status)
60 {
61 	accel_task->status = status;
62 	TAILQ_INSERT_TAIL(&sw_ch->tasks_to_complete, accel_task, link);
63 }
64 
65 static bool
66 sw_accel_supports_opcode(enum accel_opcode opc)
67 {
68 	switch (opc) {
69 	case ACCEL_OPC_COPY:
70 	case ACCEL_OPC_FILL:
71 	case ACCEL_OPC_DUALCAST:
72 	case ACCEL_OPC_COMPARE:
73 	case ACCEL_OPC_CRC32C:
74 	case ACCEL_OPC_COPY_CRC32C:
75 	case ACCEL_OPC_COMPRESS:
76 	case ACCEL_OPC_DECOMPRESS:
77 	case ACCEL_OPC_ENCRYPT:
78 	case ACCEL_OPC_DECRYPT:
79 	case ACCEL_OPC_XOR:
80 		return true;
81 	default:
82 		return false;
83 	}
84 }
85 
86 static int
87 _sw_accel_dualcast_iovs(struct iovec *dst_iovs, uint32_t dst_iovcnt,
88 			struct iovec *dst2_iovs, uint32_t dst2_iovcnt,
89 			struct iovec *src_iovs, uint32_t src_iovcnt)
90 {
91 	if (spdk_unlikely(dst_iovcnt != 1 || dst2_iovcnt != 1 || src_iovcnt != 1)) {
92 		return -EINVAL;
93 	}
94 
95 	if (spdk_unlikely(dst_iovs[0].iov_len != src_iovs[0].iov_len ||
96 			  dst_iovs[0].iov_len != dst2_iovs[0].iov_len)) {
97 		return -EINVAL;
98 	}
99 
100 	memcpy(dst_iovs[0].iov_base, src_iovs[0].iov_base, dst_iovs[0].iov_len);
101 	memcpy(dst2_iovs[0].iov_base, src_iovs[0].iov_base, dst_iovs[0].iov_len);
102 
103 	return 0;
104 }
105 
106 static void
107 _sw_accel_copy_iovs(struct iovec *dst_iovs, uint32_t dst_iovcnt,
108 		    struct iovec *src_iovs, uint32_t src_iovcnt)
109 {
110 	struct spdk_ioviter iter;
111 	void *src, *dst;
112 	size_t len;
113 
114 	for (len = spdk_ioviter_first(&iter, src_iovs, src_iovcnt,
115 				      dst_iovs, dst_iovcnt, &src, &dst);
116 	     len != 0;
117 	     len = spdk_ioviter_next(&iter, &src, &dst)) {
118 		memcpy(dst, src, len);
119 	}
120 }
121 
122 static int
123 _sw_accel_compare(struct iovec *src_iovs, uint32_t src_iovcnt,
124 		  struct iovec *src2_iovs, uint32_t src2_iovcnt)
125 {
126 	if (spdk_unlikely(src_iovcnt != 1 || src2_iovcnt != 1)) {
127 		return -EINVAL;
128 	}
129 
130 	if (spdk_unlikely(src_iovs[0].iov_len != src2_iovs[0].iov_len)) {
131 		return -EINVAL;
132 	}
133 
134 	return memcmp(src_iovs[0].iov_base, src2_iovs[0].iov_base, src_iovs[0].iov_len);
135 }
136 
137 static int
138 _sw_accel_fill(struct iovec *iovs, uint32_t iovcnt, uint8_t fill)
139 {
140 	void *dst;
141 	size_t nbytes;
142 
143 	if (spdk_unlikely(iovcnt != 1)) {
144 		return -EINVAL;
145 	}
146 
147 	dst = iovs[0].iov_base;
148 	nbytes = iovs[0].iov_len;
149 
150 	memset(dst, fill, nbytes);
151 
152 	return 0;
153 }
154 
155 static void
156 _sw_accel_crc32cv(uint32_t *crc_dst, struct iovec *iov, uint32_t iovcnt, uint32_t seed)
157 {
158 	*crc_dst = spdk_crc32c_iov_update(iov, iovcnt, ~seed);
159 }
160 
161 static int
162 _sw_accel_compress(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
163 {
164 #ifdef SPDK_CONFIG_ISAL
165 	size_t last_seglen = accel_task->s.iovs[accel_task->s.iovcnt - 1].iov_len;
166 	struct iovec *siov = accel_task->s.iovs;
167 	struct iovec *diov = accel_task->d.iovs;
168 	size_t remaining;
169 	uint32_t i, s = 0, d = 0;
170 	int rc = 0;
171 
172 	remaining = 0;
173 	for (i = 0; i < accel_task->s.iovcnt; ++i) {
174 		remaining += accel_task->s.iovs[i].iov_len;
175 	}
176 
177 	isal_deflate_reset(&sw_ch->stream);
178 	sw_ch->stream.end_of_stream = 0;
179 	sw_ch->stream.next_out = diov[d].iov_base;
180 	sw_ch->stream.avail_out = diov[d].iov_len;
181 	sw_ch->stream.next_in = siov[s].iov_base;
182 	sw_ch->stream.avail_in = siov[s].iov_len;
183 
184 	do {
185 		/* if isal has exhausted the current dst iovec, move to the next
186 		 * one if there is one */
187 		if (sw_ch->stream.avail_out == 0) {
188 			if (++d < accel_task->d.iovcnt) {
189 				sw_ch->stream.next_out = diov[d].iov_base;
190 				sw_ch->stream.avail_out = diov[d].iov_len;
191 				assert(sw_ch->stream.avail_out > 0);
192 			} else {
193 				/* we have no avail_out but also no more iovecs left so this is
194 				* the case where either the output buffer was a perfect fit
195 				* or not enough was provided.  Check the ISAL state to determine
196 				* which. */
197 				if (sw_ch->stream.internal_state.state != ZSTATE_END) {
198 					SPDK_ERRLOG("Not enough destination buffer provided.\n");
199 					rc = -ENOMEM;
200 				}
201 				break;
202 			}
203 		}
204 
205 		/* if isal has exhausted the current src iovec, move to the next
206 		 * one if there is one */
207 		if (sw_ch->stream.avail_in == 0 && ((s + 1) < accel_task->s.iovcnt)) {
208 			s++;
209 			sw_ch->stream.next_in = siov[s].iov_base;
210 			sw_ch->stream.avail_in = siov[s].iov_len;
211 			assert(sw_ch->stream.avail_in > 0);
212 		}
213 
214 		if (remaining <= last_seglen) {
215 			/* Need to set end of stream on last block */
216 			sw_ch->stream.end_of_stream = 1;
217 		}
218 
219 		rc = isal_deflate(&sw_ch->stream);
220 		if (rc) {
221 			SPDK_ERRLOG("isal_deflate returned error %d.\n", rc);
222 		}
223 
224 		if (remaining > 0) {
225 			assert(siov[s].iov_len > sw_ch->stream.avail_in);
226 			remaining -= (siov[s].iov_len - sw_ch->stream.avail_in);
227 		}
228 
229 	} while (remaining > 0 || sw_ch->stream.avail_out == 0);
230 	assert(sw_ch->stream.avail_in  == 0);
231 
232 	/* Get our total output size */
233 	if (accel_task->output_size != NULL) {
234 		assert(sw_ch->stream.total_out > 0);
235 		*accel_task->output_size = sw_ch->stream.total_out;
236 	}
237 
238 	return rc;
239 #else
240 	SPDK_ERRLOG("ISAL option is required to use software compression.\n");
241 	return -EINVAL;
242 #endif
243 }
244 
245 static int
246 _sw_accel_decompress(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
247 {
248 #ifdef SPDK_CONFIG_ISAL
249 	struct iovec *siov = accel_task->s.iovs;
250 	struct iovec *diov = accel_task->d.iovs;
251 	uint32_t s = 0, d = 0;
252 	int rc = 0;
253 
254 	isal_inflate_reset(&sw_ch->state);
255 	sw_ch->state.next_out = diov[d].iov_base;
256 	sw_ch->state.avail_out = diov[d].iov_len;
257 	sw_ch->state.next_in = siov[s].iov_base;
258 	sw_ch->state.avail_in = siov[s].iov_len;
259 
260 	do {
261 		/* if isal has exhausted the current dst iovec, move to the next
262 		 * one if there is one */
263 		if (sw_ch->state.avail_out == 0 && ((d + 1) < accel_task->d.iovcnt)) {
264 			d++;
265 			sw_ch->state.next_out = diov[d].iov_base;
266 			sw_ch->state.avail_out = diov[d].iov_len;
267 			assert(sw_ch->state.avail_out > 0);
268 		}
269 
270 		/* if isal has exhausted the current src iovec, move to the next
271 		 * one if there is one */
272 		if (sw_ch->state.avail_in == 0 && ((s + 1) < accel_task->s.iovcnt)) {
273 			s++;
274 			sw_ch->state.next_in = siov[s].iov_base;
275 			sw_ch->state.avail_in = siov[s].iov_len;
276 			assert(sw_ch->state.avail_in > 0);
277 		}
278 
279 		rc = isal_inflate(&sw_ch->state);
280 		if (rc) {
281 			SPDK_ERRLOG("isal_inflate returned error %d.\n", rc);
282 		}
283 
284 	} while (sw_ch->state.block_state < ISAL_BLOCK_FINISH);
285 	assert(sw_ch->state.avail_in == 0);
286 
287 	/* Get our total output size */
288 	if (accel_task->output_size != NULL) {
289 		assert(sw_ch->state.total_out > 0);
290 		*accel_task->output_size = sw_ch->state.total_out;
291 	}
292 
293 	return rc;
294 #else
295 	SPDK_ERRLOG("ISAL option is required to use software decompression.\n");
296 	return -EINVAL;
297 #endif
298 }
299 
300 static int
301 _sw_accel_crypto_operation(struct spdk_accel_task *accel_task, struct spdk_accel_crypto_key *key,
302 			   sw_accel_crypto_op op)
303 {
304 #ifdef SPDK_CONFIG_ISAL_CRYPTO
305 	uint64_t iv[2];
306 	size_t remaining_len, dst_len;
307 	uint64_t src_offset = 0, dst_offset = 0;
308 	uint32_t src_iovpos = 0, dst_iovpos = 0, src_iovcnt, dst_iovcnt;
309 	uint32_t i, block_size, crypto_len, crypto_accum_len = 0;
310 	struct iovec *src_iov, *dst_iov;
311 	uint8_t *src, *dst;
312 
313 	/* iv is 128 bits, since we are using logical block address (64 bits) as iv, fill first 8 bytes with zeroes */
314 	iv[0] = 0;
315 	iv[1] = accel_task->iv;
316 	src_iov = accel_task->s.iovs;
317 	src_iovcnt = accel_task->s.iovcnt;
318 	if (accel_task->d.iovcnt) {
319 		dst_iov = accel_task->d.iovs;
320 		dst_iovcnt = accel_task->d.iovcnt;
321 	} else {
322 		/* inplace operation */
323 		dst_iov = accel_task->s.iovs;
324 		dst_iovcnt = accel_task->s.iovcnt;
325 	}
326 	block_size = accel_task->block_size;
327 
328 	if (!src_iovcnt || !dst_iovcnt || !block_size || !op) {
329 		SPDK_ERRLOG("src_iovcnt %d, dst_iovcnt %d, block_size %d, op %p\n", src_iovcnt, dst_iovcnt,
330 			    block_size, op);
331 		return -EINVAL;
332 	}
333 
334 	remaining_len = 0;
335 	for (i = 0; i < src_iovcnt; i++) {
336 		remaining_len += src_iov[i].iov_len;
337 	}
338 	dst_len = 0;
339 	for (i = 0; i < dst_iovcnt; i++) {
340 		dst_len += dst_iov[i].iov_len;
341 	}
342 
343 	if (spdk_unlikely(remaining_len != dst_len || !remaining_len)) {
344 		return -ERANGE;
345 	}
346 	if (spdk_unlikely(remaining_len % accel_task->block_size != 0)) {
347 		return -EINVAL;
348 	}
349 
350 	while (remaining_len) {
351 		crypto_len = spdk_min(block_size - crypto_accum_len, src_iov->iov_len - src_offset);
352 		crypto_len = spdk_min(crypto_len, dst_iov->iov_len - dst_offset);
353 		src = (uint8_t *)src_iov->iov_base + src_offset;
354 		dst = (uint8_t *)dst_iov->iov_base + dst_offset;
355 
356 		op((uint8_t *)key->key2, (uint8_t *)key->key, (uint8_t *)iv, crypto_len, src, dst);
357 
358 		src_offset += crypto_len;
359 		dst_offset += crypto_len;
360 		crypto_accum_len += crypto_len;
361 		remaining_len -= crypto_len;
362 
363 		if (crypto_accum_len == block_size) {
364 			/* we can process part of logical block. Once the whole block is processed, increment iv */
365 			crypto_accum_len = 0;
366 			iv[1]++;
367 		}
368 		if (src_offset == src_iov->iov_len) {
369 			src_iov++;
370 			src_iovpos++;
371 			src_offset = 0;
372 		}
373 		if (src_iovpos == src_iovcnt) {
374 			break;
375 		}
376 		if (dst_offset == dst_iov->iov_len) {
377 			dst_iov++;
378 			dst_iovpos++;
379 			dst_offset = 0;
380 		}
381 		if (dst_iovpos == dst_iovcnt) {
382 			break;
383 		}
384 	}
385 
386 	if (remaining_len) {
387 		SPDK_ERRLOG("remaining len %zu\n", remaining_len);
388 		return -EINVAL;
389 	}
390 
391 	return 0;
392 #else
393 	return -ENOTSUP;
394 #endif
395 }
396 
397 static int
398 _sw_accel_encrypt(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
399 {
400 	struct spdk_accel_crypto_key *key;
401 	struct sw_accel_crypto_key_data *key_data;
402 
403 	key = accel_task->crypto_key;
404 	if (spdk_unlikely(key->module_if != &g_sw_module || !key->priv)) {
405 		return -EINVAL;
406 	}
407 	if (spdk_unlikely(accel_task->block_size > ACCEL_AES_XTS_MAX_BLOCK_SIZE)) {
408 		SPDK_WARNLOG("Max block size for AES_XTS is limited to %u, current size %u\n",
409 			     ACCEL_AES_XTS_MAX_BLOCK_SIZE, accel_task->block_size);
410 		return -ERANGE;
411 	}
412 	key_data = key->priv;
413 	return _sw_accel_crypto_operation(accel_task, key, key_data->encrypt);
414 }
415 
416 static int
417 _sw_accel_decrypt(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
418 {
419 	struct spdk_accel_crypto_key *key;
420 	struct sw_accel_crypto_key_data *key_data;
421 
422 	key = accel_task->crypto_key;
423 	if (spdk_unlikely(key->module_if != &g_sw_module || !key->priv)) {
424 		return -EINVAL;
425 	}
426 	if (spdk_unlikely(accel_task->block_size > ACCEL_AES_XTS_MAX_BLOCK_SIZE)) {
427 		SPDK_WARNLOG("Max block size for AES_XTS is limited to %u, current size %u\n",
428 			     ACCEL_AES_XTS_MAX_BLOCK_SIZE, accel_task->block_size);
429 		return -ERANGE;
430 	}
431 	key_data = key->priv;
432 	return _sw_accel_crypto_operation(accel_task, key, key_data->decrypt);
433 }
434 
435 static int
436 _sw_accel_xor(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
437 {
438 	return spdk_xor_gen(accel_task->d.iovs[0].iov_base,
439 			    accel_task->nsrcs.srcs,
440 			    accel_task->nsrcs.cnt,
441 			    accel_task->d.iovs[0].iov_len);
442 }
443 
444 static int
445 sw_accel_submit_tasks(struct spdk_io_channel *ch, struct spdk_accel_task *accel_task)
446 {
447 	struct sw_accel_io_channel *sw_ch = spdk_io_channel_get_ctx(ch);
448 	struct spdk_accel_task *tmp;
449 	int rc = 0;
450 
451 	do {
452 		switch (accel_task->op_code) {
453 		case ACCEL_OPC_COPY:
454 			_sw_accel_copy_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
455 					    accel_task->s.iovs, accel_task->s.iovcnt);
456 			break;
457 		case ACCEL_OPC_FILL:
458 			rc = _sw_accel_fill(accel_task->d.iovs, accel_task->d.iovcnt,
459 					    accel_task->fill_pattern);
460 			break;
461 		case ACCEL_OPC_DUALCAST:
462 			rc = _sw_accel_dualcast_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
463 						     accel_task->d2.iovs, accel_task->d2.iovcnt,
464 						     accel_task->s.iovs, accel_task->s.iovcnt);
465 			break;
466 		case ACCEL_OPC_COMPARE:
467 			rc = _sw_accel_compare(accel_task->s.iovs, accel_task->s.iovcnt,
468 					       accel_task->s2.iovs, accel_task->s2.iovcnt);
469 			break;
470 		case ACCEL_OPC_CRC32C:
471 			_sw_accel_crc32cv(accel_task->crc_dst, accel_task->s.iovs, accel_task->s.iovcnt, accel_task->seed);
472 			break;
473 		case ACCEL_OPC_COPY_CRC32C:
474 			_sw_accel_copy_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
475 					    accel_task->s.iovs, accel_task->s.iovcnt);
476 			_sw_accel_crc32cv(accel_task->crc_dst, accel_task->s.iovs,
477 					  accel_task->s.iovcnt, accel_task->seed);
478 			break;
479 		case ACCEL_OPC_COMPRESS:
480 			rc = _sw_accel_compress(sw_ch, accel_task);
481 			break;
482 		case ACCEL_OPC_DECOMPRESS:
483 			rc = _sw_accel_decompress(sw_ch, accel_task);
484 			break;
485 		case ACCEL_OPC_XOR:
486 			rc = _sw_accel_xor(sw_ch, accel_task);
487 			break;
488 		case ACCEL_OPC_ENCRYPT:
489 			rc = _sw_accel_encrypt(sw_ch, accel_task);
490 			break;
491 		case ACCEL_OPC_DECRYPT:
492 			rc = _sw_accel_decrypt(sw_ch, accel_task);
493 			break;
494 		default:
495 			assert(false);
496 			break;
497 		}
498 
499 		tmp = TAILQ_NEXT(accel_task, link);
500 
501 		_add_to_comp_list(sw_ch, accel_task, rc);
502 
503 		accel_task = tmp;
504 	} while (accel_task);
505 
506 	return 0;
507 }
508 
509 static struct spdk_io_channel *sw_accel_get_io_channel(void);
510 static int sw_accel_module_init(void);
511 static void sw_accel_module_fini(void *ctxt);
512 static size_t sw_accel_module_get_ctx_size(void);
513 
514 static struct spdk_accel_module_if g_sw_module = {
515 	.module_init		= sw_accel_module_init,
516 	.module_fini		= sw_accel_module_fini,
517 	.write_config_json	= NULL,
518 	.get_ctx_size		= sw_accel_module_get_ctx_size,
519 	.name			= "software",
520 	.supports_opcode	= sw_accel_supports_opcode,
521 	.get_io_channel		= sw_accel_get_io_channel,
522 	.submit_tasks		= sw_accel_submit_tasks,
523 	.crypto_key_init	= sw_accel_crypto_key_init,
524 	.crypto_key_deinit	= sw_accel_crypto_key_deinit,
525 	.crypto_supports_tweak_mode	= sw_accel_crypto_supports_tweak_mode,
526 	.crypto_supports_cipher	= sw_accel_crypto_supports_cipher,
527 };
528 
529 static int
530 accel_comp_poll(void *arg)
531 {
532 	struct sw_accel_io_channel	*sw_ch = arg;
533 	TAILQ_HEAD(, spdk_accel_task)	tasks_to_complete;
534 	struct spdk_accel_task		*accel_task;
535 
536 	if (TAILQ_EMPTY(&sw_ch->tasks_to_complete)) {
537 		return SPDK_POLLER_IDLE;
538 	}
539 
540 	TAILQ_INIT(&tasks_to_complete);
541 	TAILQ_SWAP(&tasks_to_complete, &sw_ch->tasks_to_complete, spdk_accel_task, link);
542 
543 	while ((accel_task = TAILQ_FIRST(&tasks_to_complete))) {
544 		TAILQ_REMOVE(&tasks_to_complete, accel_task, link);
545 		spdk_accel_task_complete(accel_task, accel_task->status);
546 	}
547 
548 	return SPDK_POLLER_BUSY;
549 }
550 
551 static int
552 sw_accel_create_cb(void *io_device, void *ctx_buf)
553 {
554 	struct sw_accel_io_channel *sw_ch = ctx_buf;
555 
556 	TAILQ_INIT(&sw_ch->tasks_to_complete);
557 	sw_ch->completion_poller = SPDK_POLLER_REGISTER(accel_comp_poll, sw_ch, 0);
558 
559 #ifdef SPDK_CONFIG_ISAL
560 	isal_deflate_init(&sw_ch->stream);
561 	sw_ch->stream.flush = NO_FLUSH;
562 	sw_ch->stream.level = 1;
563 	sw_ch->stream.level_buf = calloc(1, ISAL_DEF_LVL1_DEFAULT);
564 	if (sw_ch->stream.level_buf == NULL) {
565 		SPDK_ERRLOG("Could not allocate isal internal buffer\n");
566 		return -ENOMEM;
567 	}
568 	sw_ch->stream.level_buf_size = ISAL_DEF_LVL1_DEFAULT;
569 	isal_inflate_init(&sw_ch->state);
570 #endif
571 
572 	return 0;
573 }
574 
575 static void
576 sw_accel_destroy_cb(void *io_device, void *ctx_buf)
577 {
578 	struct sw_accel_io_channel *sw_ch = ctx_buf;
579 
580 #ifdef SPDK_CONFIG_ISAL
581 	free(sw_ch->stream.level_buf);
582 #endif
583 
584 	spdk_poller_unregister(&sw_ch->completion_poller);
585 }
586 
587 static struct spdk_io_channel *
588 sw_accel_get_io_channel(void)
589 {
590 	return spdk_get_io_channel(&g_sw_module);
591 }
592 
593 static size_t
594 sw_accel_module_get_ctx_size(void)
595 {
596 	return sizeof(struct spdk_accel_task);
597 }
598 
599 static int
600 sw_accel_module_init(void)
601 {
602 	spdk_io_device_register(&g_sw_module, sw_accel_create_cb, sw_accel_destroy_cb,
603 				sizeof(struct sw_accel_io_channel), "sw_accel_module");
604 
605 	return 0;
606 }
607 
608 static void
609 sw_accel_module_fini(void *ctxt)
610 {
611 	spdk_io_device_unregister(&g_sw_module, NULL);
612 	spdk_accel_module_finish();
613 }
614 
615 static int
616 sw_accel_create_aes_xts(struct spdk_accel_crypto_key *key)
617 {
618 #ifdef SPDK_CONFIG_ISAL_CRYPTO
619 	struct sw_accel_crypto_key_data *key_data;
620 
621 	key_data = calloc(1, sizeof(*key_data));
622 	if (!key_data) {
623 		return -ENOMEM;
624 	}
625 
626 	switch (key->key_size) {
627 	case SPDK_ACCEL_AES_XTS_128_KEY_SIZE:
628 		key_data->encrypt = XTS_AES_128_enc;
629 		key_data->decrypt = XTS_AES_128_dec;
630 		break;
631 	case SPDK_ACCEL_AES_XTS_256_KEY_SIZE:
632 		key_data->encrypt = XTS_AES_256_enc;
633 		key_data->decrypt = XTS_AES_256_dec;
634 		break;
635 	default:
636 		assert(0);
637 		free(key_data);
638 		return -EINVAL;
639 	}
640 
641 	key->priv = key_data;
642 
643 	return 0;
644 #else
645 	return -ENOTSUP;
646 #endif
647 }
648 
649 static int
650 sw_accel_crypto_key_init(struct spdk_accel_crypto_key *key)
651 {
652 	return sw_accel_create_aes_xts(key);
653 }
654 
655 static void
656 sw_accel_crypto_key_deinit(struct spdk_accel_crypto_key *key)
657 {
658 	if (!key || key->module_if != &g_sw_module || !key->priv) {
659 		return;
660 	}
661 
662 	free(key->priv);
663 }
664 
665 static bool
666 sw_accel_crypto_supports_tweak_mode(enum spdk_accel_crypto_tweak_mode tweak_mode)
667 {
668 	return tweak_mode == SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA;
669 }
670 
671 static bool
672 sw_accel_crypto_supports_cipher(enum spdk_accel_cipher cipher, size_t key_size)
673 {
674 	switch (cipher) {
675 	case SPDK_ACCEL_CIPHER_AES_XTS:
676 		return key_size == SPDK_ACCEL_AES_XTS_128_KEY_SIZE || key_size == SPDK_ACCEL_AES_XTS_256_KEY_SIZE;
677 	default:
678 		return false;
679 	}
680 }
681 
682 SPDK_ACCEL_MODULE_REGISTER(sw, &g_sw_module)
683