xref: /spdk/lib/accel/accel_sw.c (revision 16d862d0380886f6fc765f68a87e240bb4295595)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/accel_module.h"
10 #include "accel_internal.h"
11 
12 #include "spdk/env.h"
13 #include "spdk/likely.h"
14 #include "spdk/log.h"
15 #include "spdk/thread.h"
16 #include "spdk/json.h"
17 #include "spdk/crc32.h"
18 #include "spdk/util.h"
19 #include "spdk/xor.h"
20 #include "spdk/dif.h"
21 
22 #ifdef SPDK_CONFIG_ISAL
23 #include "../isa-l/include/igzip_lib.h"
24 #ifdef SPDK_CONFIG_ISAL_CRYPTO
25 #include "../isa-l-crypto/include/aes_xts.h"
26 #endif
27 #endif
28 
29 /* Per the AES-XTS spec, the size of data unit cannot be bigger than 2^20 blocks, 128b each block */
30 #define ACCEL_AES_XTS_MAX_BLOCK_SIZE (1 << 24)
31 
32 struct sw_accel_io_channel {
33 	/* for ISAL */
34 #ifdef SPDK_CONFIG_ISAL
35 	struct isal_zstream		stream;
36 	struct inflate_state		state;
37 #endif
38 	struct spdk_poller		*completion_poller;
39 	STAILQ_HEAD(, spdk_accel_task)	tasks_to_complete;
40 };
41 
42 typedef void (*sw_accel_crypto_op)(uint8_t *k2, uint8_t *k1, uint8_t *tweak, uint64_t lba_size,
43 				   const uint8_t *src, uint8_t *dst);
44 
45 struct sw_accel_crypto_key_data {
46 	sw_accel_crypto_op encrypt;
47 	sw_accel_crypto_op decrypt;
48 };
49 
50 static struct spdk_accel_module_if g_sw_module;
51 
52 static void sw_accel_crypto_key_deinit(struct spdk_accel_crypto_key *_key);
53 static int sw_accel_crypto_key_init(struct spdk_accel_crypto_key *key);
54 static bool sw_accel_crypto_supports_tweak_mode(enum spdk_accel_crypto_tweak_mode tweak_mode);
55 static bool sw_accel_crypto_supports_cipher(enum spdk_accel_cipher cipher, size_t key_size);
56 
57 /* Post SW completions to a list; processed by ->completion_poller. */
58 inline static void
59 _add_to_comp_list(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task, int status)
60 {
61 	accel_task->status = status;
62 	STAILQ_INSERT_TAIL(&sw_ch->tasks_to_complete, accel_task, link);
63 }
64 
65 static bool
66 sw_accel_supports_opcode(enum spdk_accel_opcode opc)
67 {
68 	switch (opc) {
69 	case SPDK_ACCEL_OPC_COPY:
70 	case SPDK_ACCEL_OPC_FILL:
71 	case SPDK_ACCEL_OPC_DUALCAST:
72 	case SPDK_ACCEL_OPC_COMPARE:
73 	case SPDK_ACCEL_OPC_CRC32C:
74 	case SPDK_ACCEL_OPC_COPY_CRC32C:
75 	case SPDK_ACCEL_OPC_COMPRESS:
76 	case SPDK_ACCEL_OPC_DECOMPRESS:
77 	case SPDK_ACCEL_OPC_ENCRYPT:
78 	case SPDK_ACCEL_OPC_DECRYPT:
79 	case SPDK_ACCEL_OPC_XOR:
80 	case SPDK_ACCEL_OPC_DIF_VERIFY:
81 	case SPDK_ACCEL_OPC_DIF_GENERATE:
82 	case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
83 	case SPDK_ACCEL_OPC_DIF_VERIFY_COPY:
84 		return true;
85 	default:
86 		return false;
87 	}
88 }
89 
90 static int
91 _sw_accel_dualcast_iovs(struct iovec *dst_iovs, uint32_t dst_iovcnt,
92 			struct iovec *dst2_iovs, uint32_t dst2_iovcnt,
93 			struct iovec *src_iovs, uint32_t src_iovcnt)
94 {
95 	if (spdk_unlikely(dst_iovcnt != 1 || dst2_iovcnt != 1 || src_iovcnt != 1)) {
96 		return -EINVAL;
97 	}
98 
99 	if (spdk_unlikely(dst_iovs[0].iov_len != src_iovs[0].iov_len ||
100 			  dst_iovs[0].iov_len != dst2_iovs[0].iov_len)) {
101 		return -EINVAL;
102 	}
103 
104 	memcpy(dst_iovs[0].iov_base, src_iovs[0].iov_base, dst_iovs[0].iov_len);
105 	memcpy(dst2_iovs[0].iov_base, src_iovs[0].iov_base, dst_iovs[0].iov_len);
106 
107 	return 0;
108 }
109 
110 static void
111 _sw_accel_copy_iovs(struct iovec *dst_iovs, uint32_t dst_iovcnt,
112 		    struct iovec *src_iovs, uint32_t src_iovcnt)
113 {
114 	struct spdk_ioviter iter;
115 	void *src, *dst;
116 	size_t len;
117 
118 	for (len = spdk_ioviter_first(&iter, src_iovs, src_iovcnt,
119 				      dst_iovs, dst_iovcnt, &src, &dst);
120 	     len != 0;
121 	     len = spdk_ioviter_next(&iter, &src, &dst)) {
122 		memcpy(dst, src, len);
123 	}
124 }
125 
126 static int
127 _sw_accel_compare(struct iovec *src_iovs, uint32_t src_iovcnt,
128 		  struct iovec *src2_iovs, uint32_t src2_iovcnt)
129 {
130 	if (spdk_unlikely(src_iovcnt != 1 || src2_iovcnt != 1)) {
131 		return -EINVAL;
132 	}
133 
134 	if (spdk_unlikely(src_iovs[0].iov_len != src2_iovs[0].iov_len)) {
135 		return -EINVAL;
136 	}
137 
138 	return memcmp(src_iovs[0].iov_base, src2_iovs[0].iov_base, src_iovs[0].iov_len);
139 }
140 
141 static int
142 _sw_accel_fill(struct iovec *iovs, uint32_t iovcnt, uint8_t fill)
143 {
144 	void *dst;
145 	size_t nbytes;
146 
147 	if (spdk_unlikely(iovcnt != 1)) {
148 		return -EINVAL;
149 	}
150 
151 	dst = iovs[0].iov_base;
152 	nbytes = iovs[0].iov_len;
153 
154 	memset(dst, fill, nbytes);
155 
156 	return 0;
157 }
158 
159 static void
160 _sw_accel_crc32cv(uint32_t *crc_dst, struct iovec *iov, uint32_t iovcnt, uint32_t seed)
161 {
162 	*crc_dst = spdk_crc32c_iov_update(iov, iovcnt, ~seed);
163 }
164 
165 static int
166 _sw_accel_compress(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
167 {
168 #ifdef SPDK_CONFIG_ISAL
169 	size_t last_seglen = accel_task->s.iovs[accel_task->s.iovcnt - 1].iov_len;
170 	struct iovec *siov = accel_task->s.iovs;
171 	struct iovec *diov = accel_task->d.iovs;
172 	size_t remaining;
173 	uint32_t i, s = 0, d = 0;
174 	int rc = 0;
175 
176 	remaining = 0;
177 	for (i = 0; i < accel_task->s.iovcnt; ++i) {
178 		remaining += accel_task->s.iovs[i].iov_len;
179 	}
180 
181 	isal_deflate_reset(&sw_ch->stream);
182 	sw_ch->stream.end_of_stream = 0;
183 	sw_ch->stream.next_out = diov[d].iov_base;
184 	sw_ch->stream.avail_out = diov[d].iov_len;
185 	sw_ch->stream.next_in = siov[s].iov_base;
186 	sw_ch->stream.avail_in = siov[s].iov_len;
187 
188 	do {
189 		/* if isal has exhausted the current dst iovec, move to the next
190 		 * one if there is one */
191 		if (sw_ch->stream.avail_out == 0) {
192 			if (++d < accel_task->d.iovcnt) {
193 				sw_ch->stream.next_out = diov[d].iov_base;
194 				sw_ch->stream.avail_out = diov[d].iov_len;
195 				assert(sw_ch->stream.avail_out > 0);
196 			} else {
197 				/* we have no avail_out but also no more iovecs left so this is
198 				* the case where either the output buffer was a perfect fit
199 				* or not enough was provided.  Check the ISAL state to determine
200 				* which. */
201 				if (sw_ch->stream.internal_state.state != ZSTATE_END) {
202 					SPDK_ERRLOG("Not enough destination buffer provided.\n");
203 					rc = -ENOMEM;
204 				}
205 				break;
206 			}
207 		}
208 
209 		/* if isal has exhausted the current src iovec, move to the next
210 		 * one if there is one */
211 		if (sw_ch->stream.avail_in == 0 && ((s + 1) < accel_task->s.iovcnt)) {
212 			s++;
213 			sw_ch->stream.next_in = siov[s].iov_base;
214 			sw_ch->stream.avail_in = siov[s].iov_len;
215 			assert(sw_ch->stream.avail_in > 0);
216 		}
217 
218 		if (remaining <= last_seglen) {
219 			/* Need to set end of stream on last block */
220 			sw_ch->stream.end_of_stream = 1;
221 		}
222 
223 		rc = isal_deflate(&sw_ch->stream);
224 		if (rc) {
225 			SPDK_ERRLOG("isal_deflate returned error %d.\n", rc);
226 		}
227 
228 		if (remaining > 0) {
229 			assert(siov[s].iov_len > sw_ch->stream.avail_in);
230 			remaining -= (siov[s].iov_len - sw_ch->stream.avail_in);
231 		}
232 
233 	} while (remaining > 0 || sw_ch->stream.avail_out == 0);
234 	assert(sw_ch->stream.avail_in  == 0);
235 
236 	/* Get our total output size */
237 	if (accel_task->output_size != NULL) {
238 		assert(sw_ch->stream.total_out > 0);
239 		*accel_task->output_size = sw_ch->stream.total_out;
240 	}
241 
242 	return rc;
243 #else
244 	SPDK_ERRLOG("ISAL option is required to use software compression.\n");
245 	return -EINVAL;
246 #endif
247 }
248 
249 static int
250 _sw_accel_decompress(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
251 {
252 #ifdef SPDK_CONFIG_ISAL
253 	struct iovec *siov = accel_task->s.iovs;
254 	struct iovec *diov = accel_task->d.iovs;
255 	uint32_t s = 0, d = 0;
256 	int rc = 0;
257 
258 	isal_inflate_reset(&sw_ch->state);
259 	sw_ch->state.next_out = diov[d].iov_base;
260 	sw_ch->state.avail_out = diov[d].iov_len;
261 	sw_ch->state.next_in = siov[s].iov_base;
262 	sw_ch->state.avail_in = siov[s].iov_len;
263 
264 	do {
265 		/* if isal has exhausted the current dst iovec, move to the next
266 		 * one if there is one */
267 		if (sw_ch->state.avail_out == 0 && ((d + 1) < accel_task->d.iovcnt)) {
268 			d++;
269 			sw_ch->state.next_out = diov[d].iov_base;
270 			sw_ch->state.avail_out = diov[d].iov_len;
271 			assert(sw_ch->state.avail_out > 0);
272 		}
273 
274 		/* if isal has exhausted the current src iovec, move to the next
275 		 * one if there is one */
276 		if (sw_ch->state.avail_in == 0 && ((s + 1) < accel_task->s.iovcnt)) {
277 			s++;
278 			sw_ch->state.next_in = siov[s].iov_base;
279 			sw_ch->state.avail_in = siov[s].iov_len;
280 			assert(sw_ch->state.avail_in > 0);
281 		}
282 
283 		rc = isal_inflate(&sw_ch->state);
284 		if (rc) {
285 			SPDK_ERRLOG("isal_inflate returned error %d.\n", rc);
286 		}
287 
288 	} while (sw_ch->state.block_state < ISAL_BLOCK_FINISH);
289 	assert(sw_ch->state.avail_in == 0);
290 
291 	/* Get our total output size */
292 	if (accel_task->output_size != NULL) {
293 		assert(sw_ch->state.total_out > 0);
294 		*accel_task->output_size = sw_ch->state.total_out;
295 	}
296 
297 	return rc;
298 #else
299 	SPDK_ERRLOG("ISAL option is required to use software decompression.\n");
300 	return -EINVAL;
301 #endif
302 }
303 
304 static int
305 _sw_accel_crypto_operation(struct spdk_accel_task *accel_task, struct spdk_accel_crypto_key *key,
306 			   sw_accel_crypto_op op)
307 {
308 #ifdef SPDK_CONFIG_ISAL_CRYPTO
309 	uint64_t iv[2];
310 	size_t remaining_len, dst_len;
311 	uint64_t src_offset = 0, dst_offset = 0;
312 	uint32_t src_iovpos = 0, dst_iovpos = 0, src_iovcnt, dst_iovcnt;
313 	uint32_t i, block_size, crypto_len, crypto_accum_len = 0;
314 	struct iovec *src_iov, *dst_iov;
315 	uint8_t *src, *dst;
316 
317 	/* iv is 128 bits, since we are using logical block address (64 bits) as iv, fill first 8 bytes with zeroes */
318 	iv[0] = 0;
319 	iv[1] = accel_task->iv;
320 	src_iov = accel_task->s.iovs;
321 	src_iovcnt = accel_task->s.iovcnt;
322 	if (accel_task->d.iovcnt) {
323 		dst_iov = accel_task->d.iovs;
324 		dst_iovcnt = accel_task->d.iovcnt;
325 	} else {
326 		/* inplace operation */
327 		dst_iov = accel_task->s.iovs;
328 		dst_iovcnt = accel_task->s.iovcnt;
329 	}
330 	block_size = accel_task->block_size;
331 
332 	if (!src_iovcnt || !dst_iovcnt || !block_size || !op) {
333 		SPDK_ERRLOG("src_iovcnt %d, dst_iovcnt %d, block_size %d, op %p\n", src_iovcnt, dst_iovcnt,
334 			    block_size, op);
335 		return -EINVAL;
336 	}
337 
338 	remaining_len = 0;
339 	for (i = 0; i < src_iovcnt; i++) {
340 		remaining_len += src_iov[i].iov_len;
341 	}
342 	dst_len = 0;
343 	for (i = 0; i < dst_iovcnt; i++) {
344 		dst_len += dst_iov[i].iov_len;
345 	}
346 
347 	if (spdk_unlikely(remaining_len != dst_len || !remaining_len)) {
348 		return -ERANGE;
349 	}
350 	if (spdk_unlikely(remaining_len % accel_task->block_size != 0)) {
351 		return -EINVAL;
352 	}
353 
354 	while (remaining_len) {
355 		crypto_len = spdk_min(block_size - crypto_accum_len, src_iov->iov_len - src_offset);
356 		crypto_len = spdk_min(crypto_len, dst_iov->iov_len - dst_offset);
357 		src = (uint8_t *)src_iov->iov_base + src_offset;
358 		dst = (uint8_t *)dst_iov->iov_base + dst_offset;
359 
360 		op((uint8_t *)key->key2, (uint8_t *)key->key, (uint8_t *)iv, crypto_len, src, dst);
361 
362 		src_offset += crypto_len;
363 		dst_offset += crypto_len;
364 		crypto_accum_len += crypto_len;
365 		remaining_len -= crypto_len;
366 
367 		if (crypto_accum_len == block_size) {
368 			/* we can process part of logical block. Once the whole block is processed, increment iv */
369 			crypto_accum_len = 0;
370 			iv[1]++;
371 		}
372 		if (src_offset == src_iov->iov_len) {
373 			src_iov++;
374 			src_iovpos++;
375 			src_offset = 0;
376 		}
377 		if (src_iovpos == src_iovcnt) {
378 			break;
379 		}
380 		if (dst_offset == dst_iov->iov_len) {
381 			dst_iov++;
382 			dst_iovpos++;
383 			dst_offset = 0;
384 		}
385 		if (dst_iovpos == dst_iovcnt) {
386 			break;
387 		}
388 	}
389 
390 	if (remaining_len) {
391 		SPDK_ERRLOG("remaining len %zu\n", remaining_len);
392 		return -EINVAL;
393 	}
394 
395 	return 0;
396 #else
397 	return -ENOTSUP;
398 #endif
399 }
400 
401 static int
402 _sw_accel_encrypt(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
403 {
404 	struct spdk_accel_crypto_key *key;
405 	struct sw_accel_crypto_key_data *key_data;
406 
407 	key = accel_task->crypto_key;
408 	if (spdk_unlikely(key->module_if != &g_sw_module || !key->priv)) {
409 		return -EINVAL;
410 	}
411 	if (spdk_unlikely(accel_task->block_size > ACCEL_AES_XTS_MAX_BLOCK_SIZE)) {
412 		SPDK_WARNLOG("Max block size for AES_XTS is limited to %u, current size %u\n",
413 			     ACCEL_AES_XTS_MAX_BLOCK_SIZE, accel_task->block_size);
414 		return -ERANGE;
415 	}
416 	key_data = key->priv;
417 	return _sw_accel_crypto_operation(accel_task, key, key_data->encrypt);
418 }
419 
420 static int
421 _sw_accel_decrypt(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
422 {
423 	struct spdk_accel_crypto_key *key;
424 	struct sw_accel_crypto_key_data *key_data;
425 
426 	key = accel_task->crypto_key;
427 	if (spdk_unlikely(key->module_if != &g_sw_module || !key->priv)) {
428 		return -EINVAL;
429 	}
430 	if (spdk_unlikely(accel_task->block_size > ACCEL_AES_XTS_MAX_BLOCK_SIZE)) {
431 		SPDK_WARNLOG("Max block size for AES_XTS is limited to %u, current size %u\n",
432 			     ACCEL_AES_XTS_MAX_BLOCK_SIZE, accel_task->block_size);
433 		return -ERANGE;
434 	}
435 	key_data = key->priv;
436 	return _sw_accel_crypto_operation(accel_task, key, key_data->decrypt);
437 }
438 
439 static int
440 _sw_accel_xor(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
441 {
442 	return spdk_xor_gen(accel_task->d.iovs[0].iov_base,
443 			    accel_task->nsrcs.srcs,
444 			    accel_task->nsrcs.cnt,
445 			    accel_task->d.iovs[0].iov_len);
446 }
447 
448 static int
449 _sw_accel_dif_verify(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
450 {
451 	return spdk_dif_verify(accel_task->s.iovs,
452 			       accel_task->s.iovcnt,
453 			       accel_task->dif.num_blocks,
454 			       accel_task->dif.ctx,
455 			       accel_task->dif.err);
456 }
457 
458 static int
459 _sw_accel_dif_verify_copy(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
460 {
461 	return spdk_dif_verify_copy(accel_task->d.iovs,
462 				    accel_task->d.iovcnt,
463 				    accel_task->s.iovs,
464 				    accel_task->s.iovcnt,
465 				    accel_task->dif.num_blocks,
466 				    accel_task->dif.ctx,
467 				    accel_task->dif.err);
468 }
469 
470 static int
471 _sw_accel_dif_generate(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
472 {
473 	return spdk_dif_generate(accel_task->s.iovs,
474 				 accel_task->s.iovcnt,
475 				 accel_task->dif.num_blocks,
476 				 accel_task->dif.ctx);
477 }
478 
479 static int
480 _sw_accel_dif_generate_copy(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
481 {
482 	return spdk_dif_generate_copy(accel_task->s.iovs,
483 				      accel_task->s.iovcnt,
484 				      accel_task->d.iovs,
485 				      accel_task->d.iovcnt,
486 				      accel_task->dif.num_blocks,
487 				      accel_task->dif.ctx);
488 }
489 
490 static int
491 accel_comp_poll(void *arg)
492 {
493 	struct sw_accel_io_channel	*sw_ch = arg;
494 	STAILQ_HEAD(, spdk_accel_task)	tasks_to_complete;
495 	struct spdk_accel_task		*accel_task;
496 
497 	if (STAILQ_EMPTY(&sw_ch->tasks_to_complete)) {
498 		return SPDK_POLLER_IDLE;
499 	}
500 
501 	STAILQ_INIT(&tasks_to_complete);
502 	STAILQ_SWAP(&tasks_to_complete, &sw_ch->tasks_to_complete, spdk_accel_task);
503 
504 	while ((accel_task = STAILQ_FIRST(&tasks_to_complete))) {
505 		STAILQ_REMOVE_HEAD(&tasks_to_complete, link);
506 		spdk_accel_task_complete(accel_task, accel_task->status);
507 	}
508 
509 	return SPDK_POLLER_BUSY;
510 }
511 
512 static int
513 sw_accel_submit_tasks(struct spdk_io_channel *ch, struct spdk_accel_task *accel_task)
514 {
515 	struct sw_accel_io_channel *sw_ch = spdk_io_channel_get_ctx(ch);
516 	struct spdk_accel_task *tmp;
517 	int rc = 0;
518 
519 	/*
520 	 * Lazily initialize our completion poller. We don't want to complete
521 	 * them inline as they'll likely submit another.
522 	 */
523 	if (spdk_unlikely(sw_ch->completion_poller == NULL)) {
524 		sw_ch->completion_poller = SPDK_POLLER_REGISTER(accel_comp_poll, sw_ch, 0);
525 	}
526 
527 	do {
528 		switch (accel_task->op_code) {
529 		case SPDK_ACCEL_OPC_COPY:
530 			_sw_accel_copy_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
531 					    accel_task->s.iovs, accel_task->s.iovcnt);
532 			break;
533 		case SPDK_ACCEL_OPC_FILL:
534 			rc = _sw_accel_fill(accel_task->d.iovs, accel_task->d.iovcnt,
535 					    accel_task->fill_pattern);
536 			break;
537 		case SPDK_ACCEL_OPC_DUALCAST:
538 			rc = _sw_accel_dualcast_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
539 						     accel_task->d2.iovs, accel_task->d2.iovcnt,
540 						     accel_task->s.iovs, accel_task->s.iovcnt);
541 			break;
542 		case SPDK_ACCEL_OPC_COMPARE:
543 			rc = _sw_accel_compare(accel_task->s.iovs, accel_task->s.iovcnt,
544 					       accel_task->s2.iovs, accel_task->s2.iovcnt);
545 			break;
546 		case SPDK_ACCEL_OPC_CRC32C:
547 			_sw_accel_crc32cv(accel_task->crc_dst, accel_task->s.iovs, accel_task->s.iovcnt, accel_task->seed);
548 			break;
549 		case SPDK_ACCEL_OPC_COPY_CRC32C:
550 			_sw_accel_copy_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
551 					    accel_task->s.iovs, accel_task->s.iovcnt);
552 			_sw_accel_crc32cv(accel_task->crc_dst, accel_task->s.iovs,
553 					  accel_task->s.iovcnt, accel_task->seed);
554 			break;
555 		case SPDK_ACCEL_OPC_COMPRESS:
556 			rc = _sw_accel_compress(sw_ch, accel_task);
557 			break;
558 		case SPDK_ACCEL_OPC_DECOMPRESS:
559 			rc = _sw_accel_decompress(sw_ch, accel_task);
560 			break;
561 		case SPDK_ACCEL_OPC_XOR:
562 			rc = _sw_accel_xor(sw_ch, accel_task);
563 			break;
564 		case SPDK_ACCEL_OPC_ENCRYPT:
565 			rc = _sw_accel_encrypt(sw_ch, accel_task);
566 			break;
567 		case SPDK_ACCEL_OPC_DECRYPT:
568 			rc = _sw_accel_decrypt(sw_ch, accel_task);
569 			break;
570 		case SPDK_ACCEL_OPC_DIF_VERIFY:
571 			rc = _sw_accel_dif_verify(sw_ch, accel_task);
572 			break;
573 		case SPDK_ACCEL_OPC_DIF_VERIFY_COPY:
574 			rc = _sw_accel_dif_verify_copy(sw_ch, accel_task);
575 			break;
576 		case SPDK_ACCEL_OPC_DIF_GENERATE:
577 			rc = _sw_accel_dif_generate(sw_ch, accel_task);
578 			break;
579 		case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
580 			rc = _sw_accel_dif_generate_copy(sw_ch, accel_task);
581 			break;
582 		default:
583 			assert(false);
584 			break;
585 		}
586 
587 		tmp = STAILQ_NEXT(accel_task, link);
588 
589 		_add_to_comp_list(sw_ch, accel_task, rc);
590 
591 		accel_task = tmp;
592 	} while (accel_task);
593 
594 	return 0;
595 }
596 
597 static int
598 sw_accel_create_cb(void *io_device, void *ctx_buf)
599 {
600 	struct sw_accel_io_channel *sw_ch = ctx_buf;
601 
602 	STAILQ_INIT(&sw_ch->tasks_to_complete);
603 	sw_ch->completion_poller = NULL;
604 
605 #ifdef SPDK_CONFIG_ISAL
606 	isal_deflate_init(&sw_ch->stream);
607 	sw_ch->stream.flush = NO_FLUSH;
608 	sw_ch->stream.level = 1;
609 	sw_ch->stream.level_buf = calloc(1, ISAL_DEF_LVL1_DEFAULT);
610 	if (sw_ch->stream.level_buf == NULL) {
611 		SPDK_ERRLOG("Could not allocate isal internal buffer\n");
612 		return -ENOMEM;
613 	}
614 	sw_ch->stream.level_buf_size = ISAL_DEF_LVL1_DEFAULT;
615 	isal_inflate_init(&sw_ch->state);
616 #endif
617 
618 	return 0;
619 }
620 
621 static void
622 sw_accel_destroy_cb(void *io_device, void *ctx_buf)
623 {
624 	struct sw_accel_io_channel *sw_ch = ctx_buf;
625 
626 #ifdef SPDK_CONFIG_ISAL
627 	free(sw_ch->stream.level_buf);
628 #endif
629 
630 	spdk_poller_unregister(&sw_ch->completion_poller);
631 }
632 
633 static struct spdk_io_channel *
634 sw_accel_get_io_channel(void)
635 {
636 	return spdk_get_io_channel(&g_sw_module);
637 }
638 
639 static size_t
640 sw_accel_module_get_ctx_size(void)
641 {
642 	return sizeof(struct spdk_accel_task);
643 }
644 
645 static int
646 sw_accel_module_init(void)
647 {
648 	spdk_io_device_register(&g_sw_module, sw_accel_create_cb, sw_accel_destroy_cb,
649 				sizeof(struct sw_accel_io_channel), "sw_accel_module");
650 
651 	return 0;
652 }
653 
654 static void
655 sw_accel_module_fini(void *ctxt)
656 {
657 	spdk_io_device_unregister(&g_sw_module, NULL);
658 	spdk_accel_module_finish();
659 }
660 
661 static int
662 sw_accel_create_aes_xts(struct spdk_accel_crypto_key *key)
663 {
664 #ifdef SPDK_CONFIG_ISAL_CRYPTO
665 	struct sw_accel_crypto_key_data *key_data;
666 
667 	key_data = calloc(1, sizeof(*key_data));
668 	if (!key_data) {
669 		return -ENOMEM;
670 	}
671 
672 	switch (key->key_size) {
673 	case SPDK_ACCEL_AES_XTS_128_KEY_SIZE:
674 		key_data->encrypt = XTS_AES_128_enc;
675 		key_data->decrypt = XTS_AES_128_dec;
676 		break;
677 	case SPDK_ACCEL_AES_XTS_256_KEY_SIZE:
678 		key_data->encrypt = XTS_AES_256_enc;
679 		key_data->decrypt = XTS_AES_256_dec;
680 		break;
681 	default:
682 		assert(0);
683 		free(key_data);
684 		return -EINVAL;
685 	}
686 
687 	key->priv = key_data;
688 
689 	return 0;
690 #else
691 	return -ENOTSUP;
692 #endif
693 }
694 
695 static int
696 sw_accel_crypto_key_init(struct spdk_accel_crypto_key *key)
697 {
698 	return sw_accel_create_aes_xts(key);
699 }
700 
701 static void
702 sw_accel_crypto_key_deinit(struct spdk_accel_crypto_key *key)
703 {
704 	if (!key || key->module_if != &g_sw_module || !key->priv) {
705 		return;
706 	}
707 
708 	free(key->priv);
709 }
710 
711 static bool
712 sw_accel_crypto_supports_tweak_mode(enum spdk_accel_crypto_tweak_mode tweak_mode)
713 {
714 	return tweak_mode == SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA;
715 }
716 
717 static bool
718 sw_accel_crypto_supports_cipher(enum spdk_accel_cipher cipher, size_t key_size)
719 {
720 	switch (cipher) {
721 	case SPDK_ACCEL_CIPHER_AES_XTS:
722 		return key_size == SPDK_ACCEL_AES_XTS_128_KEY_SIZE || key_size == SPDK_ACCEL_AES_XTS_256_KEY_SIZE;
723 	default:
724 		return false;
725 	}
726 }
727 
728 static int
729 sw_accel_get_operation_info(enum spdk_accel_opcode opcode,
730 			    const struct spdk_accel_operation_exec_ctx *ctx,
731 			    struct spdk_accel_opcode_info *info)
732 {
733 	info->required_alignment = 0;
734 
735 	return 0;
736 }
737 
738 static struct spdk_accel_module_if g_sw_module = {
739 	.module_init			= sw_accel_module_init,
740 	.module_fini			= sw_accel_module_fini,
741 	.write_config_json		= NULL,
742 	.get_ctx_size			= sw_accel_module_get_ctx_size,
743 	.name				= "software",
744 	.priority			= SPDK_ACCEL_SW_PRIORITY,
745 	.supports_opcode		= sw_accel_supports_opcode,
746 	.get_io_channel			= sw_accel_get_io_channel,
747 	.submit_tasks			= sw_accel_submit_tasks,
748 	.crypto_key_init		= sw_accel_crypto_key_init,
749 	.crypto_key_deinit		= sw_accel_crypto_key_deinit,
750 	.crypto_supports_tweak_mode	= sw_accel_crypto_supports_tweak_mode,
751 	.crypto_supports_cipher		= sw_accel_crypto_supports_cipher,
752 	.get_operation_info		= sw_accel_get_operation_info,
753 };
754 
755 SPDK_ACCEL_MODULE_REGISTER(sw, &g_sw_module)
756