xref: /spdk/lib/accel/accel_sw.c (revision b0ca75fce1a225a920f48a0dcedf6146a2cf5cc9)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/accel_module.h"
10 #include "accel_internal.h"
11 
12 #include "spdk/env.h"
13 #include "spdk/likely.h"
14 #include "spdk/log.h"
15 #include "spdk/thread.h"
16 #include "spdk/json.h"
17 #include "spdk/crc32.h"
18 #include "spdk/util.h"
19 #include "spdk/xor.h"
20 #include "spdk/dif.h"
21 
22 #ifdef SPDK_CONFIG_ISAL
23 #include "../isa-l/include/igzip_lib.h"
24 #ifdef SPDK_CONFIG_ISAL_CRYPTO
25 #include "../isa-l-crypto/include/aes_xts.h"
26 #include "../isa-l-crypto/include/isal_crypto_api.h"
27 #endif
28 #endif
29 
30 /* Per the AES-XTS spec, the size of data unit cannot be bigger than 2^20 blocks, 128b each block */
31 #define ACCEL_AES_XTS_MAX_BLOCK_SIZE (1 << 24)
32 
33 struct sw_accel_io_channel {
34 	/* for ISAL */
35 #ifdef SPDK_CONFIG_ISAL
36 	struct isal_zstream		stream;
37 	struct inflate_state		state;
38 #endif
39 	struct spdk_poller		*completion_poller;
40 	STAILQ_HEAD(, spdk_accel_task)	tasks_to_complete;
41 };
42 
43 typedef int (*sw_accel_crypto_op)(const uint8_t *k2, const uint8_t *k1,
44 				  const uint8_t *initial_tweak, const uint64_t len_bytes,
45 				  const void *in, void *out);
46 
47 struct sw_accel_crypto_key_data {
48 	sw_accel_crypto_op encrypt;
49 	sw_accel_crypto_op decrypt;
50 };
51 
52 static struct spdk_accel_module_if g_sw_module;
53 
54 static void sw_accel_crypto_key_deinit(struct spdk_accel_crypto_key *_key);
55 static int sw_accel_crypto_key_init(struct spdk_accel_crypto_key *key);
56 static bool sw_accel_crypto_supports_tweak_mode(enum spdk_accel_crypto_tweak_mode tweak_mode);
57 static bool sw_accel_crypto_supports_cipher(enum spdk_accel_cipher cipher, size_t key_size);
58 
59 /* Post SW completions to a list; processed by ->completion_poller. */
60 inline static void
61 _add_to_comp_list(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task, int status)
62 {
63 	accel_task->status = status;
64 	STAILQ_INSERT_TAIL(&sw_ch->tasks_to_complete, accel_task, link);
65 }
66 
67 static bool
68 sw_accel_supports_opcode(enum spdk_accel_opcode opc)
69 {
70 	switch (opc) {
71 	case SPDK_ACCEL_OPC_COPY:
72 	case SPDK_ACCEL_OPC_FILL:
73 	case SPDK_ACCEL_OPC_DUALCAST:
74 	case SPDK_ACCEL_OPC_COMPARE:
75 	case SPDK_ACCEL_OPC_CRC32C:
76 	case SPDK_ACCEL_OPC_COPY_CRC32C:
77 	case SPDK_ACCEL_OPC_COMPRESS:
78 	case SPDK_ACCEL_OPC_DECOMPRESS:
79 	case SPDK_ACCEL_OPC_ENCRYPT:
80 	case SPDK_ACCEL_OPC_DECRYPT:
81 	case SPDK_ACCEL_OPC_XOR:
82 	case SPDK_ACCEL_OPC_DIF_VERIFY:
83 	case SPDK_ACCEL_OPC_DIF_GENERATE:
84 	case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
85 	case SPDK_ACCEL_OPC_DIF_VERIFY_COPY:
86 	case SPDK_ACCEL_OPC_DIX_GENERATE:
87 	case SPDK_ACCEL_OPC_DIX_VERIFY:
88 		return true;
89 	default:
90 		return false;
91 	}
92 }
93 
94 static int
95 _sw_accel_dualcast_iovs(struct iovec *dst_iovs, uint32_t dst_iovcnt,
96 			struct iovec *dst2_iovs, uint32_t dst2_iovcnt,
97 			struct iovec *src_iovs, uint32_t src_iovcnt)
98 {
99 	if (spdk_unlikely(dst_iovcnt != 1 || dst2_iovcnt != 1 || src_iovcnt != 1)) {
100 		return -EINVAL;
101 	}
102 
103 	if (spdk_unlikely(dst_iovs[0].iov_len != src_iovs[0].iov_len ||
104 			  dst_iovs[0].iov_len != dst2_iovs[0].iov_len)) {
105 		return -EINVAL;
106 	}
107 
108 	memcpy(dst_iovs[0].iov_base, src_iovs[0].iov_base, dst_iovs[0].iov_len);
109 	memcpy(dst2_iovs[0].iov_base, src_iovs[0].iov_base, dst_iovs[0].iov_len);
110 
111 	return 0;
112 }
113 
114 static void
115 _sw_accel_copy_iovs(struct iovec *dst_iovs, uint32_t dst_iovcnt,
116 		    struct iovec *src_iovs, uint32_t src_iovcnt)
117 {
118 	struct spdk_ioviter iter;
119 	void *src, *dst;
120 	size_t len;
121 
122 	for (len = spdk_ioviter_first(&iter, src_iovs, src_iovcnt,
123 				      dst_iovs, dst_iovcnt, &src, &dst);
124 	     len != 0;
125 	     len = spdk_ioviter_next(&iter, &src, &dst)) {
126 		memcpy(dst, src, len);
127 	}
128 }
129 
130 static int
131 _sw_accel_compare(struct iovec *src_iovs, uint32_t src_iovcnt,
132 		  struct iovec *src2_iovs, uint32_t src2_iovcnt)
133 {
134 	if (spdk_unlikely(src_iovcnt != 1 || src2_iovcnt != 1)) {
135 		return -EINVAL;
136 	}
137 
138 	if (spdk_unlikely(src_iovs[0].iov_len != src2_iovs[0].iov_len)) {
139 		return -EINVAL;
140 	}
141 
142 	return memcmp(src_iovs[0].iov_base, src2_iovs[0].iov_base, src_iovs[0].iov_len);
143 }
144 
145 static int
146 _sw_accel_fill(struct iovec *iovs, uint32_t iovcnt, uint8_t fill)
147 {
148 	void *dst;
149 	size_t nbytes;
150 
151 	if (spdk_unlikely(iovcnt != 1)) {
152 		return -EINVAL;
153 	}
154 
155 	dst = iovs[0].iov_base;
156 	nbytes = iovs[0].iov_len;
157 
158 	memset(dst, fill, nbytes);
159 
160 	return 0;
161 }
162 
163 static void
164 _sw_accel_crc32cv(uint32_t *crc_dst, struct iovec *iov, uint32_t iovcnt, uint32_t seed)
165 {
166 	*crc_dst = spdk_crc32c_iov_update(iov, iovcnt, ~seed);
167 }
168 
169 static int
170 _sw_accel_compress(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
171 {
172 #ifdef SPDK_CONFIG_ISAL
173 	size_t last_seglen = accel_task->s.iovs[accel_task->s.iovcnt - 1].iov_len;
174 	struct iovec *siov = accel_task->s.iovs;
175 	struct iovec *diov = accel_task->d.iovs;
176 	size_t remaining;
177 	uint32_t i, s = 0, d = 0;
178 	int rc = 0;
179 
180 	remaining = 0;
181 	for (i = 0; i < accel_task->s.iovcnt; ++i) {
182 		remaining += accel_task->s.iovs[i].iov_len;
183 	}
184 
185 	isal_deflate_reset(&sw_ch->stream);
186 	sw_ch->stream.end_of_stream = 0;
187 	sw_ch->stream.next_out = diov[d].iov_base;
188 	sw_ch->stream.avail_out = diov[d].iov_len;
189 	sw_ch->stream.next_in = siov[s].iov_base;
190 	sw_ch->stream.avail_in = siov[s].iov_len;
191 
192 	do {
193 		/* if isal has exhausted the current dst iovec, move to the next
194 		 * one if there is one */
195 		if (sw_ch->stream.avail_out == 0) {
196 			if (++d < accel_task->d.iovcnt) {
197 				sw_ch->stream.next_out = diov[d].iov_base;
198 				sw_ch->stream.avail_out = diov[d].iov_len;
199 				assert(sw_ch->stream.avail_out > 0);
200 			} else {
201 				/* we have no avail_out but also no more iovecs left so this is
202 				* the case where either the output buffer was a perfect fit
203 				* or not enough was provided.  Check the ISAL state to determine
204 				* which. */
205 				if (sw_ch->stream.internal_state.state != ZSTATE_END) {
206 					SPDK_ERRLOG("Not enough destination buffer provided.\n");
207 					rc = -ENOMEM;
208 				}
209 				break;
210 			}
211 		}
212 
213 		/* if isal has exhausted the current src iovec, move to the next
214 		 * one if there is one */
215 		if (sw_ch->stream.avail_in == 0 && ((s + 1) < accel_task->s.iovcnt)) {
216 			s++;
217 			sw_ch->stream.next_in = siov[s].iov_base;
218 			sw_ch->stream.avail_in = siov[s].iov_len;
219 			assert(sw_ch->stream.avail_in > 0);
220 		}
221 
222 		if (remaining <= last_seglen) {
223 			/* Need to set end of stream on last block */
224 			sw_ch->stream.end_of_stream = 1;
225 		}
226 
227 		rc = isal_deflate(&sw_ch->stream);
228 		if (rc) {
229 			SPDK_ERRLOG("isal_deflate returned error %d.\n", rc);
230 		}
231 
232 		if (remaining > 0) {
233 			assert(siov[s].iov_len > sw_ch->stream.avail_in);
234 			remaining -= (siov[s].iov_len - sw_ch->stream.avail_in);
235 		}
236 
237 	} while (remaining > 0 || sw_ch->stream.avail_out == 0);
238 	assert(sw_ch->stream.avail_in  == 0);
239 
240 	/* Get our total output size */
241 	if (accel_task->output_size != NULL) {
242 		assert(sw_ch->stream.total_out > 0);
243 		*accel_task->output_size = sw_ch->stream.total_out;
244 	}
245 
246 	return rc;
247 #else
248 	SPDK_ERRLOG("ISAL option is required to use software compression.\n");
249 	return -EINVAL;
250 #endif
251 }
252 
253 static int
254 _sw_accel_decompress(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
255 {
256 #ifdef SPDK_CONFIG_ISAL
257 	struct iovec *siov = accel_task->s.iovs;
258 	struct iovec *diov = accel_task->d.iovs;
259 	uint32_t s = 0, d = 0;
260 	int rc = 0;
261 
262 	isal_inflate_reset(&sw_ch->state);
263 	sw_ch->state.next_out = diov[d].iov_base;
264 	sw_ch->state.avail_out = diov[d].iov_len;
265 	sw_ch->state.next_in = siov[s].iov_base;
266 	sw_ch->state.avail_in = siov[s].iov_len;
267 
268 	do {
269 		/* if isal has exhausted the current dst iovec, move to the next
270 		 * one if there is one */
271 		if (sw_ch->state.avail_out == 0 && ((d + 1) < accel_task->d.iovcnt)) {
272 			d++;
273 			sw_ch->state.next_out = diov[d].iov_base;
274 			sw_ch->state.avail_out = diov[d].iov_len;
275 			assert(sw_ch->state.avail_out > 0);
276 		}
277 
278 		/* if isal has exhausted the current src iovec, move to the next
279 		 * one if there is one */
280 		if (sw_ch->state.avail_in == 0 && ((s + 1) < accel_task->s.iovcnt)) {
281 			s++;
282 			sw_ch->state.next_in = siov[s].iov_base;
283 			sw_ch->state.avail_in = siov[s].iov_len;
284 			assert(sw_ch->state.avail_in > 0);
285 		}
286 
287 		rc = isal_inflate(&sw_ch->state);
288 		if (rc) {
289 			SPDK_ERRLOG("isal_inflate returned error %d.\n", rc);
290 		}
291 
292 	} while (sw_ch->state.block_state < ISAL_BLOCK_FINISH);
293 	assert(sw_ch->state.avail_in == 0);
294 
295 	/* Get our total output size */
296 	if (accel_task->output_size != NULL) {
297 		assert(sw_ch->state.total_out > 0);
298 		*accel_task->output_size = sw_ch->state.total_out;
299 	}
300 
301 	return rc;
302 #else
303 	SPDK_ERRLOG("ISAL option is required to use software decompression.\n");
304 	return -EINVAL;
305 #endif
306 }
307 
308 static int
309 _sw_accel_crypto_operation(struct spdk_accel_task *accel_task, struct spdk_accel_crypto_key *key,
310 			   sw_accel_crypto_op op)
311 {
312 #ifdef SPDK_CONFIG_ISAL_CRYPTO
313 	uint64_t iv[2];
314 	size_t remaining_len, dst_len;
315 	uint64_t src_offset = 0, dst_offset = 0;
316 	uint32_t src_iovpos = 0, dst_iovpos = 0, src_iovcnt, dst_iovcnt;
317 	uint32_t i, block_size, crypto_len, crypto_accum_len = 0;
318 	struct iovec *src_iov, *dst_iov;
319 	uint8_t *src, *dst;
320 	int rc;
321 
322 	/* iv is 128 bits, since we are using logical block address (64 bits) as iv, fill first 8 bytes with zeroes */
323 	iv[0] = 0;
324 	iv[1] = accel_task->iv;
325 	src_iov = accel_task->s.iovs;
326 	src_iovcnt = accel_task->s.iovcnt;
327 	if (accel_task->d.iovcnt) {
328 		dst_iov = accel_task->d.iovs;
329 		dst_iovcnt = accel_task->d.iovcnt;
330 	} else {
331 		/* inplace operation */
332 		dst_iov = accel_task->s.iovs;
333 		dst_iovcnt = accel_task->s.iovcnt;
334 	}
335 	block_size = accel_task->block_size;
336 
337 	if (!src_iovcnt || !dst_iovcnt || !block_size || !op) {
338 		SPDK_ERRLOG("src_iovcnt %d, dst_iovcnt %d, block_size %d, op %p\n", src_iovcnt, dst_iovcnt,
339 			    block_size, op);
340 		return -EINVAL;
341 	}
342 
343 	remaining_len = 0;
344 	for (i = 0; i < src_iovcnt; i++) {
345 		remaining_len += src_iov[i].iov_len;
346 	}
347 	dst_len = 0;
348 	for (i = 0; i < dst_iovcnt; i++) {
349 		dst_len += dst_iov[i].iov_len;
350 	}
351 
352 	if (spdk_unlikely(remaining_len != dst_len || !remaining_len)) {
353 		return -ERANGE;
354 	}
355 	if (spdk_unlikely(remaining_len % accel_task->block_size != 0)) {
356 		return -EINVAL;
357 	}
358 
359 	while (remaining_len) {
360 		crypto_len = spdk_min(block_size - crypto_accum_len, src_iov->iov_len - src_offset);
361 		crypto_len = spdk_min(crypto_len, dst_iov->iov_len - dst_offset);
362 		src = (uint8_t *)src_iov->iov_base + src_offset;
363 		dst = (uint8_t *)dst_iov->iov_base + dst_offset;
364 
365 		rc = op((uint8_t *)key->key2, (uint8_t *)key->key, (uint8_t *)iv, crypto_len, src, dst);
366 		if (rc != ISAL_CRYPTO_ERR_NONE) {
367 			break;
368 		}
369 
370 		src_offset += crypto_len;
371 		dst_offset += crypto_len;
372 		crypto_accum_len += crypto_len;
373 		remaining_len -= crypto_len;
374 
375 		if (crypto_accum_len == block_size) {
376 			/* we can process part of logical block. Once the whole block is processed, increment iv */
377 			crypto_accum_len = 0;
378 			iv[1]++;
379 		}
380 		if (src_offset == src_iov->iov_len) {
381 			src_iov++;
382 			src_iovpos++;
383 			src_offset = 0;
384 		}
385 		if (src_iovpos == src_iovcnt) {
386 			break;
387 		}
388 		if (dst_offset == dst_iov->iov_len) {
389 			dst_iov++;
390 			dst_iovpos++;
391 			dst_offset = 0;
392 		}
393 		if (dst_iovpos == dst_iovcnt) {
394 			break;
395 		}
396 	}
397 
398 	if (remaining_len) {
399 		SPDK_ERRLOG("remaining len %zu\n", remaining_len);
400 		return -EINVAL;
401 	}
402 
403 	return 0;
404 #else
405 	return -ENOTSUP;
406 #endif
407 }
408 
409 static int
410 _sw_accel_encrypt(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
411 {
412 	struct spdk_accel_crypto_key *key;
413 	struct sw_accel_crypto_key_data *key_data;
414 
415 	key = accel_task->crypto_key;
416 	if (spdk_unlikely(key->module_if != &g_sw_module || !key->priv)) {
417 		return -EINVAL;
418 	}
419 	if (spdk_unlikely(accel_task->block_size > ACCEL_AES_XTS_MAX_BLOCK_SIZE)) {
420 		SPDK_WARNLOG("Max block size for AES_XTS is limited to %u, current size %u\n",
421 			     ACCEL_AES_XTS_MAX_BLOCK_SIZE, accel_task->block_size);
422 		return -ERANGE;
423 	}
424 	key_data = key->priv;
425 	return _sw_accel_crypto_operation(accel_task, key, key_data->encrypt);
426 }
427 
428 static int
429 _sw_accel_decrypt(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
430 {
431 	struct spdk_accel_crypto_key *key;
432 	struct sw_accel_crypto_key_data *key_data;
433 
434 	key = accel_task->crypto_key;
435 	if (spdk_unlikely(key->module_if != &g_sw_module || !key->priv)) {
436 		return -EINVAL;
437 	}
438 	if (spdk_unlikely(accel_task->block_size > ACCEL_AES_XTS_MAX_BLOCK_SIZE)) {
439 		SPDK_WARNLOG("Max block size for AES_XTS is limited to %u, current size %u\n",
440 			     ACCEL_AES_XTS_MAX_BLOCK_SIZE, accel_task->block_size);
441 		return -ERANGE;
442 	}
443 	key_data = key->priv;
444 	return _sw_accel_crypto_operation(accel_task, key, key_data->decrypt);
445 }
446 
447 static int
448 _sw_accel_xor(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
449 {
450 	return spdk_xor_gen(accel_task->d.iovs[0].iov_base,
451 			    accel_task->nsrcs.srcs,
452 			    accel_task->nsrcs.cnt,
453 			    accel_task->d.iovs[0].iov_len);
454 }
455 
456 static int
457 _sw_accel_dif_verify(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
458 {
459 	return spdk_dif_verify(accel_task->s.iovs,
460 			       accel_task->s.iovcnt,
461 			       accel_task->dif.num_blocks,
462 			       accel_task->dif.ctx,
463 			       accel_task->dif.err);
464 }
465 
466 static int
467 _sw_accel_dif_verify_copy(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
468 {
469 	return spdk_dif_verify_copy(accel_task->d.iovs,
470 				    accel_task->d.iovcnt,
471 				    accel_task->s.iovs,
472 				    accel_task->s.iovcnt,
473 				    accel_task->dif.num_blocks,
474 				    accel_task->dif.ctx,
475 				    accel_task->dif.err);
476 }
477 
478 static int
479 _sw_accel_dif_generate(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
480 {
481 	return spdk_dif_generate(accel_task->s.iovs,
482 				 accel_task->s.iovcnt,
483 				 accel_task->dif.num_blocks,
484 				 accel_task->dif.ctx);
485 }
486 
487 static int
488 _sw_accel_dif_generate_copy(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
489 {
490 	return spdk_dif_generate_copy(accel_task->s.iovs,
491 				      accel_task->s.iovcnt,
492 				      accel_task->d.iovs,
493 				      accel_task->d.iovcnt,
494 				      accel_task->dif.num_blocks,
495 				      accel_task->dif.ctx);
496 }
497 
498 static int
499 _sw_accel_dix_generate(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
500 {
501 	return spdk_dix_generate(accel_task->s.iovs,
502 				 accel_task->s.iovcnt,
503 				 accel_task->d.iovs,
504 				 accel_task->dif.num_blocks,
505 				 accel_task->dif.ctx);
506 }
507 
508 static int
509 _sw_accel_dix_verify(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
510 {
511 	return spdk_dix_verify(accel_task->s.iovs,
512 			       accel_task->s.iovcnt,
513 			       accel_task->d.iovs,
514 			       accel_task->dif.num_blocks,
515 			       accel_task->dif.ctx,
516 			       accel_task->dif.err);
517 }
518 
519 static int
520 accel_comp_poll(void *arg)
521 {
522 	struct sw_accel_io_channel	*sw_ch = arg;
523 	STAILQ_HEAD(, spdk_accel_task)	tasks_to_complete;
524 	struct spdk_accel_task		*accel_task;
525 
526 	if (STAILQ_EMPTY(&sw_ch->tasks_to_complete)) {
527 		return SPDK_POLLER_IDLE;
528 	}
529 
530 	STAILQ_INIT(&tasks_to_complete);
531 	STAILQ_SWAP(&tasks_to_complete, &sw_ch->tasks_to_complete, spdk_accel_task);
532 
533 	while ((accel_task = STAILQ_FIRST(&tasks_to_complete))) {
534 		STAILQ_REMOVE_HEAD(&tasks_to_complete, link);
535 		spdk_accel_task_complete(accel_task, accel_task->status);
536 	}
537 
538 	return SPDK_POLLER_BUSY;
539 }
540 
541 static int
542 sw_accel_submit_tasks(struct spdk_io_channel *ch, struct spdk_accel_task *accel_task)
543 {
544 	struct sw_accel_io_channel *sw_ch = spdk_io_channel_get_ctx(ch);
545 	struct spdk_accel_task *tmp;
546 	int rc = 0;
547 
548 	/*
549 	 * Lazily initialize our completion poller. We don't want to complete
550 	 * them inline as they'll likely submit another.
551 	 */
552 	if (spdk_unlikely(sw_ch->completion_poller == NULL)) {
553 		sw_ch->completion_poller = SPDK_POLLER_REGISTER(accel_comp_poll, sw_ch, 0);
554 	}
555 
556 	do {
557 		switch (accel_task->op_code) {
558 		case SPDK_ACCEL_OPC_COPY:
559 			_sw_accel_copy_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
560 					    accel_task->s.iovs, accel_task->s.iovcnt);
561 			break;
562 		case SPDK_ACCEL_OPC_FILL:
563 			rc = _sw_accel_fill(accel_task->d.iovs, accel_task->d.iovcnt,
564 					    accel_task->fill_pattern);
565 			break;
566 		case SPDK_ACCEL_OPC_DUALCAST:
567 			rc = _sw_accel_dualcast_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
568 						     accel_task->d2.iovs, accel_task->d2.iovcnt,
569 						     accel_task->s.iovs, accel_task->s.iovcnt);
570 			break;
571 		case SPDK_ACCEL_OPC_COMPARE:
572 			rc = _sw_accel_compare(accel_task->s.iovs, accel_task->s.iovcnt,
573 					       accel_task->s2.iovs, accel_task->s2.iovcnt);
574 			break;
575 		case SPDK_ACCEL_OPC_CRC32C:
576 			_sw_accel_crc32cv(accel_task->crc_dst, accel_task->s.iovs, accel_task->s.iovcnt, accel_task->seed);
577 			break;
578 		case SPDK_ACCEL_OPC_COPY_CRC32C:
579 			_sw_accel_copy_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
580 					    accel_task->s.iovs, accel_task->s.iovcnt);
581 			_sw_accel_crc32cv(accel_task->crc_dst, accel_task->s.iovs,
582 					  accel_task->s.iovcnt, accel_task->seed);
583 			break;
584 		case SPDK_ACCEL_OPC_COMPRESS:
585 			rc = _sw_accel_compress(sw_ch, accel_task);
586 			break;
587 		case SPDK_ACCEL_OPC_DECOMPRESS:
588 			rc = _sw_accel_decompress(sw_ch, accel_task);
589 			break;
590 		case SPDK_ACCEL_OPC_XOR:
591 			rc = _sw_accel_xor(sw_ch, accel_task);
592 			break;
593 		case SPDK_ACCEL_OPC_ENCRYPT:
594 			rc = _sw_accel_encrypt(sw_ch, accel_task);
595 			break;
596 		case SPDK_ACCEL_OPC_DECRYPT:
597 			rc = _sw_accel_decrypt(sw_ch, accel_task);
598 			break;
599 		case SPDK_ACCEL_OPC_DIF_VERIFY:
600 			rc = _sw_accel_dif_verify(sw_ch, accel_task);
601 			break;
602 		case SPDK_ACCEL_OPC_DIF_VERIFY_COPY:
603 			rc = _sw_accel_dif_verify_copy(sw_ch, accel_task);
604 			break;
605 		case SPDK_ACCEL_OPC_DIF_GENERATE:
606 			rc = _sw_accel_dif_generate(sw_ch, accel_task);
607 			break;
608 		case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
609 			rc = _sw_accel_dif_generate_copy(sw_ch, accel_task);
610 			break;
611 		case SPDK_ACCEL_OPC_DIX_GENERATE:
612 			rc = _sw_accel_dix_generate(sw_ch, accel_task);
613 			break;
614 		case SPDK_ACCEL_OPC_DIX_VERIFY:
615 			rc = _sw_accel_dix_verify(sw_ch, accel_task);
616 			break;
617 		default:
618 			assert(false);
619 			break;
620 		}
621 
622 		tmp = STAILQ_NEXT(accel_task, link);
623 
624 		_add_to_comp_list(sw_ch, accel_task, rc);
625 
626 		accel_task = tmp;
627 	} while (accel_task);
628 
629 	return 0;
630 }
631 
632 static int
633 sw_accel_create_cb(void *io_device, void *ctx_buf)
634 {
635 	struct sw_accel_io_channel *sw_ch = ctx_buf;
636 
637 	STAILQ_INIT(&sw_ch->tasks_to_complete);
638 	sw_ch->completion_poller = NULL;
639 
640 #ifdef SPDK_CONFIG_ISAL
641 	isal_deflate_init(&sw_ch->stream);
642 	sw_ch->stream.flush = NO_FLUSH;
643 	sw_ch->stream.level = 1;
644 	sw_ch->stream.level_buf = calloc(1, ISAL_DEF_LVL1_DEFAULT);
645 	if (sw_ch->stream.level_buf == NULL) {
646 		SPDK_ERRLOG("Could not allocate isal internal buffer\n");
647 		return -ENOMEM;
648 	}
649 	sw_ch->stream.level_buf_size = ISAL_DEF_LVL1_DEFAULT;
650 	isal_inflate_init(&sw_ch->state);
651 #endif
652 
653 	return 0;
654 }
655 
656 static void
657 sw_accel_destroy_cb(void *io_device, void *ctx_buf)
658 {
659 	struct sw_accel_io_channel *sw_ch = ctx_buf;
660 
661 #ifdef SPDK_CONFIG_ISAL
662 	free(sw_ch->stream.level_buf);
663 #endif
664 
665 	spdk_poller_unregister(&sw_ch->completion_poller);
666 }
667 
668 static struct spdk_io_channel *
669 sw_accel_get_io_channel(void)
670 {
671 	return spdk_get_io_channel(&g_sw_module);
672 }
673 
674 static size_t
675 sw_accel_module_get_ctx_size(void)
676 {
677 	return sizeof(struct spdk_accel_task);
678 }
679 
680 static int
681 sw_accel_module_init(void)
682 {
683 	spdk_io_device_register(&g_sw_module, sw_accel_create_cb, sw_accel_destroy_cb,
684 				sizeof(struct sw_accel_io_channel), "sw_accel_module");
685 
686 	return 0;
687 }
688 
689 static void
690 sw_accel_module_fini(void *ctxt)
691 {
692 	spdk_io_device_unregister(&g_sw_module, NULL);
693 	spdk_accel_module_finish();
694 }
695 
696 static int
697 sw_accel_create_aes_xts(struct spdk_accel_crypto_key *key)
698 {
699 #ifdef SPDK_CONFIG_ISAL_CRYPTO
700 	struct sw_accel_crypto_key_data *key_data;
701 
702 	key_data = calloc(1, sizeof(*key_data));
703 	if (!key_data) {
704 		return -ENOMEM;
705 	}
706 
707 	switch (key->key_size) {
708 	case SPDK_ACCEL_AES_XTS_128_KEY_SIZE:
709 		key_data->encrypt = isal_aes_xts_enc_128;
710 		key_data->decrypt = isal_aes_xts_dec_128;
711 		break;
712 	case SPDK_ACCEL_AES_XTS_256_KEY_SIZE:
713 		key_data->encrypt = isal_aes_xts_enc_256;
714 		key_data->decrypt = isal_aes_xts_dec_256;
715 		break;
716 	default:
717 		assert(0);
718 		free(key_data);
719 		return -EINVAL;
720 	}
721 
722 	key->priv = key_data;
723 
724 	return 0;
725 #else
726 	return -ENOTSUP;
727 #endif
728 }
729 
730 static int
731 sw_accel_crypto_key_init(struct spdk_accel_crypto_key *key)
732 {
733 	return sw_accel_create_aes_xts(key);
734 }
735 
736 static void
737 sw_accel_crypto_key_deinit(struct spdk_accel_crypto_key *key)
738 {
739 	if (!key || key->module_if != &g_sw_module || !key->priv) {
740 		return;
741 	}
742 
743 	free(key->priv);
744 }
745 
746 static bool
747 sw_accel_crypto_supports_tweak_mode(enum spdk_accel_crypto_tweak_mode tweak_mode)
748 {
749 	return tweak_mode == SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA;
750 }
751 
752 static bool
753 sw_accel_crypto_supports_cipher(enum spdk_accel_cipher cipher, size_t key_size)
754 {
755 	switch (cipher) {
756 	case SPDK_ACCEL_CIPHER_AES_XTS:
757 		return key_size == SPDK_ACCEL_AES_XTS_128_KEY_SIZE || key_size == SPDK_ACCEL_AES_XTS_256_KEY_SIZE;
758 	default:
759 		return false;
760 	}
761 }
762 
763 static int
764 sw_accel_get_operation_info(enum spdk_accel_opcode opcode,
765 			    const struct spdk_accel_operation_exec_ctx *ctx,
766 			    struct spdk_accel_opcode_info *info)
767 {
768 	info->required_alignment = 0;
769 
770 	return 0;
771 }
772 
773 static struct spdk_accel_module_if g_sw_module = {
774 	.module_init			= sw_accel_module_init,
775 	.module_fini			= sw_accel_module_fini,
776 	.write_config_json		= NULL,
777 	.get_ctx_size			= sw_accel_module_get_ctx_size,
778 	.name				= "software",
779 	.priority			= SPDK_ACCEL_SW_PRIORITY,
780 	.supports_opcode		= sw_accel_supports_opcode,
781 	.get_io_channel			= sw_accel_get_io_channel,
782 	.submit_tasks			= sw_accel_submit_tasks,
783 	.crypto_key_init		= sw_accel_crypto_key_init,
784 	.crypto_key_deinit		= sw_accel_crypto_key_deinit,
785 	.crypto_supports_tweak_mode	= sw_accel_crypto_supports_tweak_mode,
786 	.crypto_supports_cipher		= sw_accel_crypto_supports_cipher,
787 	.get_operation_info		= sw_accel_get_operation_info,
788 };
789 
790 SPDK_ACCEL_MODULE_REGISTER(sw, &g_sw_module)
791