xref: /spdk/lib/accel/accel_sw.c (revision 7025ceb9c119a6da0b6ee2013b6ae94b51fac2df)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/accel_module.h"
10 #include "accel_internal.h"
11 
12 #include "spdk/env.h"
13 #include "spdk/likely.h"
14 #include "spdk/log.h"
15 #include "spdk/thread.h"
16 #include "spdk/json.h"
17 #include "spdk/crc32.h"
18 #include "spdk/util.h"
19 #include "spdk/xor.h"
20 #include "spdk/dif.h"
21 
22 #ifdef SPDK_CONFIG_ISAL
23 #include "../isa-l/include/igzip_lib.h"
24 #ifdef SPDK_CONFIG_ISAL_CRYPTO
25 #include "../isa-l-crypto/include/aes_xts.h"
26 #endif
27 #endif
28 
29 /* Per the AES-XTS spec, the size of data unit cannot be bigger than 2^20 blocks, 128b each block */
30 #define ACCEL_AES_XTS_MAX_BLOCK_SIZE (1 << 24)
31 
32 struct sw_accel_io_channel {
33 	/* for ISAL */
34 #ifdef SPDK_CONFIG_ISAL
35 	struct isal_zstream		stream;
36 	struct inflate_state		state;
37 #endif
38 	struct spdk_poller		*completion_poller;
39 	STAILQ_HEAD(, spdk_accel_task)	tasks_to_complete;
40 };
41 
42 typedef void (*sw_accel_crypto_op)(uint8_t *k2, uint8_t *k1, uint8_t *tweak, uint64_t lba_size,
43 				   const uint8_t *src, uint8_t *dst);
44 
45 struct sw_accel_crypto_key_data {
46 	sw_accel_crypto_op encrypt;
47 	sw_accel_crypto_op decrypt;
48 };
49 
50 static struct spdk_accel_module_if g_sw_module;
51 
52 static void sw_accel_crypto_key_deinit(struct spdk_accel_crypto_key *_key);
53 static int sw_accel_crypto_key_init(struct spdk_accel_crypto_key *key);
54 static bool sw_accel_crypto_supports_tweak_mode(enum spdk_accel_crypto_tweak_mode tweak_mode);
55 static bool sw_accel_crypto_supports_cipher(enum spdk_accel_cipher cipher, size_t key_size);
56 
57 /* Post SW completions to a list; processed by ->completion_poller. */
58 inline static void
59 _add_to_comp_list(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task, int status)
60 {
61 	accel_task->status = status;
62 	STAILQ_INSERT_TAIL(&sw_ch->tasks_to_complete, accel_task, link);
63 }
64 
65 static bool
66 sw_accel_supports_opcode(enum spdk_accel_opcode opc)
67 {
68 	switch (opc) {
69 	case SPDK_ACCEL_OPC_COPY:
70 	case SPDK_ACCEL_OPC_FILL:
71 	case SPDK_ACCEL_OPC_DUALCAST:
72 	case SPDK_ACCEL_OPC_COMPARE:
73 	case SPDK_ACCEL_OPC_CRC32C:
74 	case SPDK_ACCEL_OPC_COPY_CRC32C:
75 	case SPDK_ACCEL_OPC_COMPRESS:
76 	case SPDK_ACCEL_OPC_DECOMPRESS:
77 	case SPDK_ACCEL_OPC_ENCRYPT:
78 	case SPDK_ACCEL_OPC_DECRYPT:
79 	case SPDK_ACCEL_OPC_XOR:
80 	case SPDK_ACCEL_OPC_DIF_VERIFY:
81 	case SPDK_ACCEL_OPC_DIF_GENERATE:
82 	case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
83 		return true;
84 	default:
85 		return false;
86 	}
87 }
88 
89 static int
90 _sw_accel_dualcast_iovs(struct iovec *dst_iovs, uint32_t dst_iovcnt,
91 			struct iovec *dst2_iovs, uint32_t dst2_iovcnt,
92 			struct iovec *src_iovs, uint32_t src_iovcnt)
93 {
94 	if (spdk_unlikely(dst_iovcnt != 1 || dst2_iovcnt != 1 || src_iovcnt != 1)) {
95 		return -EINVAL;
96 	}
97 
98 	if (spdk_unlikely(dst_iovs[0].iov_len != src_iovs[0].iov_len ||
99 			  dst_iovs[0].iov_len != dst2_iovs[0].iov_len)) {
100 		return -EINVAL;
101 	}
102 
103 	memcpy(dst_iovs[0].iov_base, src_iovs[0].iov_base, dst_iovs[0].iov_len);
104 	memcpy(dst2_iovs[0].iov_base, src_iovs[0].iov_base, dst_iovs[0].iov_len);
105 
106 	return 0;
107 }
108 
109 static void
110 _sw_accel_copy_iovs(struct iovec *dst_iovs, uint32_t dst_iovcnt,
111 		    struct iovec *src_iovs, uint32_t src_iovcnt)
112 {
113 	struct spdk_ioviter iter;
114 	void *src, *dst;
115 	size_t len;
116 
117 	for (len = spdk_ioviter_first(&iter, src_iovs, src_iovcnt,
118 				      dst_iovs, dst_iovcnt, &src, &dst);
119 	     len != 0;
120 	     len = spdk_ioviter_next(&iter, &src, &dst)) {
121 		memcpy(dst, src, len);
122 	}
123 }
124 
125 static int
126 _sw_accel_compare(struct iovec *src_iovs, uint32_t src_iovcnt,
127 		  struct iovec *src2_iovs, uint32_t src2_iovcnt)
128 {
129 	if (spdk_unlikely(src_iovcnt != 1 || src2_iovcnt != 1)) {
130 		return -EINVAL;
131 	}
132 
133 	if (spdk_unlikely(src_iovs[0].iov_len != src2_iovs[0].iov_len)) {
134 		return -EINVAL;
135 	}
136 
137 	return memcmp(src_iovs[0].iov_base, src2_iovs[0].iov_base, src_iovs[0].iov_len);
138 }
139 
140 static int
141 _sw_accel_fill(struct iovec *iovs, uint32_t iovcnt, uint8_t fill)
142 {
143 	void *dst;
144 	size_t nbytes;
145 
146 	if (spdk_unlikely(iovcnt != 1)) {
147 		return -EINVAL;
148 	}
149 
150 	dst = iovs[0].iov_base;
151 	nbytes = iovs[0].iov_len;
152 
153 	memset(dst, fill, nbytes);
154 
155 	return 0;
156 }
157 
158 static void
159 _sw_accel_crc32cv(uint32_t *crc_dst, struct iovec *iov, uint32_t iovcnt, uint32_t seed)
160 {
161 	*crc_dst = spdk_crc32c_iov_update(iov, iovcnt, ~seed);
162 }
163 
164 static int
165 _sw_accel_compress(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
166 {
167 #ifdef SPDK_CONFIG_ISAL
168 	size_t last_seglen = accel_task->s.iovs[accel_task->s.iovcnt - 1].iov_len;
169 	struct iovec *siov = accel_task->s.iovs;
170 	struct iovec *diov = accel_task->d.iovs;
171 	size_t remaining;
172 	uint32_t i, s = 0, d = 0;
173 	int rc = 0;
174 
175 	remaining = 0;
176 	for (i = 0; i < accel_task->s.iovcnt; ++i) {
177 		remaining += accel_task->s.iovs[i].iov_len;
178 	}
179 
180 	isal_deflate_reset(&sw_ch->stream);
181 	sw_ch->stream.end_of_stream = 0;
182 	sw_ch->stream.next_out = diov[d].iov_base;
183 	sw_ch->stream.avail_out = diov[d].iov_len;
184 	sw_ch->stream.next_in = siov[s].iov_base;
185 	sw_ch->stream.avail_in = siov[s].iov_len;
186 
187 	do {
188 		/* if isal has exhausted the current dst iovec, move to the next
189 		 * one if there is one */
190 		if (sw_ch->stream.avail_out == 0) {
191 			if (++d < accel_task->d.iovcnt) {
192 				sw_ch->stream.next_out = diov[d].iov_base;
193 				sw_ch->stream.avail_out = diov[d].iov_len;
194 				assert(sw_ch->stream.avail_out > 0);
195 			} else {
196 				/* we have no avail_out but also no more iovecs left so this is
197 				* the case where either the output buffer was a perfect fit
198 				* or not enough was provided.  Check the ISAL state to determine
199 				* which. */
200 				if (sw_ch->stream.internal_state.state != ZSTATE_END) {
201 					SPDK_ERRLOG("Not enough destination buffer provided.\n");
202 					rc = -ENOMEM;
203 				}
204 				break;
205 			}
206 		}
207 
208 		/* if isal has exhausted the current src iovec, move to the next
209 		 * one if there is one */
210 		if (sw_ch->stream.avail_in == 0 && ((s + 1) < accel_task->s.iovcnt)) {
211 			s++;
212 			sw_ch->stream.next_in = siov[s].iov_base;
213 			sw_ch->stream.avail_in = siov[s].iov_len;
214 			assert(sw_ch->stream.avail_in > 0);
215 		}
216 
217 		if (remaining <= last_seglen) {
218 			/* Need to set end of stream on last block */
219 			sw_ch->stream.end_of_stream = 1;
220 		}
221 
222 		rc = isal_deflate(&sw_ch->stream);
223 		if (rc) {
224 			SPDK_ERRLOG("isal_deflate returned error %d.\n", rc);
225 		}
226 
227 		if (remaining > 0) {
228 			assert(siov[s].iov_len > sw_ch->stream.avail_in);
229 			remaining -= (siov[s].iov_len - sw_ch->stream.avail_in);
230 		}
231 
232 	} while (remaining > 0 || sw_ch->stream.avail_out == 0);
233 	assert(sw_ch->stream.avail_in  == 0);
234 
235 	/* Get our total output size */
236 	if (accel_task->output_size != NULL) {
237 		assert(sw_ch->stream.total_out > 0);
238 		*accel_task->output_size = sw_ch->stream.total_out;
239 	}
240 
241 	return rc;
242 #else
243 	SPDK_ERRLOG("ISAL option is required to use software compression.\n");
244 	return -EINVAL;
245 #endif
246 }
247 
248 static int
249 _sw_accel_decompress(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
250 {
251 #ifdef SPDK_CONFIG_ISAL
252 	struct iovec *siov = accel_task->s.iovs;
253 	struct iovec *diov = accel_task->d.iovs;
254 	uint32_t s = 0, d = 0;
255 	int rc = 0;
256 
257 	isal_inflate_reset(&sw_ch->state);
258 	sw_ch->state.next_out = diov[d].iov_base;
259 	sw_ch->state.avail_out = diov[d].iov_len;
260 	sw_ch->state.next_in = siov[s].iov_base;
261 	sw_ch->state.avail_in = siov[s].iov_len;
262 
263 	do {
264 		/* if isal has exhausted the current dst iovec, move to the next
265 		 * one if there is one */
266 		if (sw_ch->state.avail_out == 0 && ((d + 1) < accel_task->d.iovcnt)) {
267 			d++;
268 			sw_ch->state.next_out = diov[d].iov_base;
269 			sw_ch->state.avail_out = diov[d].iov_len;
270 			assert(sw_ch->state.avail_out > 0);
271 		}
272 
273 		/* if isal has exhausted the current src iovec, move to the next
274 		 * one if there is one */
275 		if (sw_ch->state.avail_in == 0 && ((s + 1) < accel_task->s.iovcnt)) {
276 			s++;
277 			sw_ch->state.next_in = siov[s].iov_base;
278 			sw_ch->state.avail_in = siov[s].iov_len;
279 			assert(sw_ch->state.avail_in > 0);
280 		}
281 
282 		rc = isal_inflate(&sw_ch->state);
283 		if (rc) {
284 			SPDK_ERRLOG("isal_inflate returned error %d.\n", rc);
285 		}
286 
287 	} while (sw_ch->state.block_state < ISAL_BLOCK_FINISH);
288 	assert(sw_ch->state.avail_in == 0);
289 
290 	/* Get our total output size */
291 	if (accel_task->output_size != NULL) {
292 		assert(sw_ch->state.total_out > 0);
293 		*accel_task->output_size = sw_ch->state.total_out;
294 	}
295 
296 	return rc;
297 #else
298 	SPDK_ERRLOG("ISAL option is required to use software decompression.\n");
299 	return -EINVAL;
300 #endif
301 }
302 
303 static int
304 _sw_accel_crypto_operation(struct spdk_accel_task *accel_task, struct spdk_accel_crypto_key *key,
305 			   sw_accel_crypto_op op)
306 {
307 #ifdef SPDK_CONFIG_ISAL_CRYPTO
308 	uint64_t iv[2];
309 	size_t remaining_len, dst_len;
310 	uint64_t src_offset = 0, dst_offset = 0;
311 	uint32_t src_iovpos = 0, dst_iovpos = 0, src_iovcnt, dst_iovcnt;
312 	uint32_t i, block_size, crypto_len, crypto_accum_len = 0;
313 	struct iovec *src_iov, *dst_iov;
314 	uint8_t *src, *dst;
315 
316 	/* iv is 128 bits, since we are using logical block address (64 bits) as iv, fill first 8 bytes with zeroes */
317 	iv[0] = 0;
318 	iv[1] = accel_task->iv;
319 	src_iov = accel_task->s.iovs;
320 	src_iovcnt = accel_task->s.iovcnt;
321 	if (accel_task->d.iovcnt) {
322 		dst_iov = accel_task->d.iovs;
323 		dst_iovcnt = accel_task->d.iovcnt;
324 	} else {
325 		/* inplace operation */
326 		dst_iov = accel_task->s.iovs;
327 		dst_iovcnt = accel_task->s.iovcnt;
328 	}
329 	block_size = accel_task->block_size;
330 
331 	if (!src_iovcnt || !dst_iovcnt || !block_size || !op) {
332 		SPDK_ERRLOG("src_iovcnt %d, dst_iovcnt %d, block_size %d, op %p\n", src_iovcnt, dst_iovcnt,
333 			    block_size, op);
334 		return -EINVAL;
335 	}
336 
337 	remaining_len = 0;
338 	for (i = 0; i < src_iovcnt; i++) {
339 		remaining_len += src_iov[i].iov_len;
340 	}
341 	dst_len = 0;
342 	for (i = 0; i < dst_iovcnt; i++) {
343 		dst_len += dst_iov[i].iov_len;
344 	}
345 
346 	if (spdk_unlikely(remaining_len != dst_len || !remaining_len)) {
347 		return -ERANGE;
348 	}
349 	if (spdk_unlikely(remaining_len % accel_task->block_size != 0)) {
350 		return -EINVAL;
351 	}
352 
353 	while (remaining_len) {
354 		crypto_len = spdk_min(block_size - crypto_accum_len, src_iov->iov_len - src_offset);
355 		crypto_len = spdk_min(crypto_len, dst_iov->iov_len - dst_offset);
356 		src = (uint8_t *)src_iov->iov_base + src_offset;
357 		dst = (uint8_t *)dst_iov->iov_base + dst_offset;
358 
359 		op((uint8_t *)key->key2, (uint8_t *)key->key, (uint8_t *)iv, crypto_len, src, dst);
360 
361 		src_offset += crypto_len;
362 		dst_offset += crypto_len;
363 		crypto_accum_len += crypto_len;
364 		remaining_len -= crypto_len;
365 
366 		if (crypto_accum_len == block_size) {
367 			/* we can process part of logical block. Once the whole block is processed, increment iv */
368 			crypto_accum_len = 0;
369 			iv[1]++;
370 		}
371 		if (src_offset == src_iov->iov_len) {
372 			src_iov++;
373 			src_iovpos++;
374 			src_offset = 0;
375 		}
376 		if (src_iovpos == src_iovcnt) {
377 			break;
378 		}
379 		if (dst_offset == dst_iov->iov_len) {
380 			dst_iov++;
381 			dst_iovpos++;
382 			dst_offset = 0;
383 		}
384 		if (dst_iovpos == dst_iovcnt) {
385 			break;
386 		}
387 	}
388 
389 	if (remaining_len) {
390 		SPDK_ERRLOG("remaining len %zu\n", remaining_len);
391 		return -EINVAL;
392 	}
393 
394 	return 0;
395 #else
396 	return -ENOTSUP;
397 #endif
398 }
399 
400 static int
401 _sw_accel_encrypt(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
402 {
403 	struct spdk_accel_crypto_key *key;
404 	struct sw_accel_crypto_key_data *key_data;
405 
406 	key = accel_task->crypto_key;
407 	if (spdk_unlikely(key->module_if != &g_sw_module || !key->priv)) {
408 		return -EINVAL;
409 	}
410 	if (spdk_unlikely(accel_task->block_size > ACCEL_AES_XTS_MAX_BLOCK_SIZE)) {
411 		SPDK_WARNLOG("Max block size for AES_XTS is limited to %u, current size %u\n",
412 			     ACCEL_AES_XTS_MAX_BLOCK_SIZE, accel_task->block_size);
413 		return -ERANGE;
414 	}
415 	key_data = key->priv;
416 	return _sw_accel_crypto_operation(accel_task, key, key_data->encrypt);
417 }
418 
419 static int
420 _sw_accel_decrypt(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
421 {
422 	struct spdk_accel_crypto_key *key;
423 	struct sw_accel_crypto_key_data *key_data;
424 
425 	key = accel_task->crypto_key;
426 	if (spdk_unlikely(key->module_if != &g_sw_module || !key->priv)) {
427 		return -EINVAL;
428 	}
429 	if (spdk_unlikely(accel_task->block_size > ACCEL_AES_XTS_MAX_BLOCK_SIZE)) {
430 		SPDK_WARNLOG("Max block size for AES_XTS is limited to %u, current size %u\n",
431 			     ACCEL_AES_XTS_MAX_BLOCK_SIZE, accel_task->block_size);
432 		return -ERANGE;
433 	}
434 	key_data = key->priv;
435 	return _sw_accel_crypto_operation(accel_task, key, key_data->decrypt);
436 }
437 
438 static int
439 _sw_accel_xor(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
440 {
441 	return spdk_xor_gen(accel_task->d.iovs[0].iov_base,
442 			    accel_task->nsrcs.srcs,
443 			    accel_task->nsrcs.cnt,
444 			    accel_task->d.iovs[0].iov_len);
445 }
446 
447 static int
448 _sw_accel_dif_verify(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
449 {
450 	return spdk_dif_verify(accel_task->s.iovs,
451 			       accel_task->s.iovcnt,
452 			       accel_task->dif.num_blocks,
453 			       accel_task->dif.ctx,
454 			       accel_task->dif.err);
455 }
456 
457 static int
458 _sw_accel_dif_generate(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
459 {
460 	return spdk_dif_generate(accel_task->s.iovs,
461 				 accel_task->s.iovcnt,
462 				 accel_task->dif.num_blocks,
463 				 accel_task->dif.ctx);
464 }
465 
466 static int
467 _sw_accel_dif_generate_copy(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
468 {
469 	return spdk_dif_generate_copy(accel_task->s.iovs,
470 				      accel_task->s.iovcnt,
471 				      accel_task->d.iovs,
472 				      accel_task->d.iovcnt,
473 				      accel_task->dif.num_blocks,
474 				      accel_task->dif.ctx);
475 }
476 
477 static int
478 accel_comp_poll(void *arg)
479 {
480 	struct sw_accel_io_channel	*sw_ch = arg;
481 	STAILQ_HEAD(, spdk_accel_task)	tasks_to_complete;
482 	struct spdk_accel_task		*accel_task;
483 
484 	if (STAILQ_EMPTY(&sw_ch->tasks_to_complete)) {
485 		return SPDK_POLLER_IDLE;
486 	}
487 
488 	STAILQ_INIT(&tasks_to_complete);
489 	STAILQ_SWAP(&tasks_to_complete, &sw_ch->tasks_to_complete, spdk_accel_task);
490 
491 	while ((accel_task = STAILQ_FIRST(&tasks_to_complete))) {
492 		STAILQ_REMOVE_HEAD(&tasks_to_complete, link);
493 		spdk_accel_task_complete(accel_task, accel_task->status);
494 	}
495 
496 	return SPDK_POLLER_BUSY;
497 }
498 
499 static int
500 sw_accel_submit_tasks(struct spdk_io_channel *ch, struct spdk_accel_task *accel_task)
501 {
502 	struct sw_accel_io_channel *sw_ch = spdk_io_channel_get_ctx(ch);
503 	struct spdk_accel_task *tmp;
504 	int rc = 0;
505 
506 	/*
507 	 * Lazily initialize our completion poller. We don't want to complete
508 	 * them inline as they'll likely submit another.
509 	 */
510 	if (spdk_unlikely(sw_ch->completion_poller == NULL)) {
511 		sw_ch->completion_poller = SPDK_POLLER_REGISTER(accel_comp_poll, sw_ch, 0);
512 	}
513 
514 	do {
515 		switch (accel_task->op_code) {
516 		case SPDK_ACCEL_OPC_COPY:
517 			_sw_accel_copy_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
518 					    accel_task->s.iovs, accel_task->s.iovcnt);
519 			break;
520 		case SPDK_ACCEL_OPC_FILL:
521 			rc = _sw_accel_fill(accel_task->d.iovs, accel_task->d.iovcnt,
522 					    accel_task->fill_pattern);
523 			break;
524 		case SPDK_ACCEL_OPC_DUALCAST:
525 			rc = _sw_accel_dualcast_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
526 						     accel_task->d2.iovs, accel_task->d2.iovcnt,
527 						     accel_task->s.iovs, accel_task->s.iovcnt);
528 			break;
529 		case SPDK_ACCEL_OPC_COMPARE:
530 			rc = _sw_accel_compare(accel_task->s.iovs, accel_task->s.iovcnt,
531 					       accel_task->s2.iovs, accel_task->s2.iovcnt);
532 			break;
533 		case SPDK_ACCEL_OPC_CRC32C:
534 			_sw_accel_crc32cv(accel_task->crc_dst, accel_task->s.iovs, accel_task->s.iovcnt, accel_task->seed);
535 			break;
536 		case SPDK_ACCEL_OPC_COPY_CRC32C:
537 			_sw_accel_copy_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
538 					    accel_task->s.iovs, accel_task->s.iovcnt);
539 			_sw_accel_crc32cv(accel_task->crc_dst, accel_task->s.iovs,
540 					  accel_task->s.iovcnt, accel_task->seed);
541 			break;
542 		case SPDK_ACCEL_OPC_COMPRESS:
543 			rc = _sw_accel_compress(sw_ch, accel_task);
544 			break;
545 		case SPDK_ACCEL_OPC_DECOMPRESS:
546 			rc = _sw_accel_decompress(sw_ch, accel_task);
547 			break;
548 		case SPDK_ACCEL_OPC_XOR:
549 			rc = _sw_accel_xor(sw_ch, accel_task);
550 			break;
551 		case SPDK_ACCEL_OPC_ENCRYPT:
552 			rc = _sw_accel_encrypt(sw_ch, accel_task);
553 			break;
554 		case SPDK_ACCEL_OPC_DECRYPT:
555 			rc = _sw_accel_decrypt(sw_ch, accel_task);
556 			break;
557 		case SPDK_ACCEL_OPC_DIF_VERIFY:
558 			rc = _sw_accel_dif_verify(sw_ch, accel_task);
559 			break;
560 		case SPDK_ACCEL_OPC_DIF_GENERATE:
561 			rc = _sw_accel_dif_generate(sw_ch, accel_task);
562 			break;
563 		case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
564 			rc = _sw_accel_dif_generate_copy(sw_ch, accel_task);
565 			break;
566 		default:
567 			assert(false);
568 			break;
569 		}
570 
571 		tmp = STAILQ_NEXT(accel_task, link);
572 
573 		_add_to_comp_list(sw_ch, accel_task, rc);
574 
575 		accel_task = tmp;
576 	} while (accel_task);
577 
578 	return 0;
579 }
580 
581 static int
582 sw_accel_create_cb(void *io_device, void *ctx_buf)
583 {
584 	struct sw_accel_io_channel *sw_ch = ctx_buf;
585 
586 	STAILQ_INIT(&sw_ch->tasks_to_complete);
587 	sw_ch->completion_poller = NULL;
588 
589 #ifdef SPDK_CONFIG_ISAL
590 	isal_deflate_init(&sw_ch->stream);
591 	sw_ch->stream.flush = NO_FLUSH;
592 	sw_ch->stream.level = 1;
593 	sw_ch->stream.level_buf = calloc(1, ISAL_DEF_LVL1_DEFAULT);
594 	if (sw_ch->stream.level_buf == NULL) {
595 		SPDK_ERRLOG("Could not allocate isal internal buffer\n");
596 		return -ENOMEM;
597 	}
598 	sw_ch->stream.level_buf_size = ISAL_DEF_LVL1_DEFAULT;
599 	isal_inflate_init(&sw_ch->state);
600 #endif
601 
602 	return 0;
603 }
604 
605 static void
606 sw_accel_destroy_cb(void *io_device, void *ctx_buf)
607 {
608 	struct sw_accel_io_channel *sw_ch = ctx_buf;
609 
610 #ifdef SPDK_CONFIG_ISAL
611 	free(sw_ch->stream.level_buf);
612 #endif
613 
614 	spdk_poller_unregister(&sw_ch->completion_poller);
615 }
616 
617 static struct spdk_io_channel *
618 sw_accel_get_io_channel(void)
619 {
620 	return spdk_get_io_channel(&g_sw_module);
621 }
622 
623 static size_t
624 sw_accel_module_get_ctx_size(void)
625 {
626 	return sizeof(struct spdk_accel_task);
627 }
628 
629 static int
630 sw_accel_module_init(void)
631 {
632 	spdk_io_device_register(&g_sw_module, sw_accel_create_cb, sw_accel_destroy_cb,
633 				sizeof(struct sw_accel_io_channel), "sw_accel_module");
634 
635 	return 0;
636 }
637 
638 static void
639 sw_accel_module_fini(void *ctxt)
640 {
641 	spdk_io_device_unregister(&g_sw_module, NULL);
642 	spdk_accel_module_finish();
643 }
644 
645 static int
646 sw_accel_create_aes_xts(struct spdk_accel_crypto_key *key)
647 {
648 #ifdef SPDK_CONFIG_ISAL_CRYPTO
649 	struct sw_accel_crypto_key_data *key_data;
650 
651 	key_data = calloc(1, sizeof(*key_data));
652 	if (!key_data) {
653 		return -ENOMEM;
654 	}
655 
656 	switch (key->key_size) {
657 	case SPDK_ACCEL_AES_XTS_128_KEY_SIZE:
658 		key_data->encrypt = XTS_AES_128_enc;
659 		key_data->decrypt = XTS_AES_128_dec;
660 		break;
661 	case SPDK_ACCEL_AES_XTS_256_KEY_SIZE:
662 		key_data->encrypt = XTS_AES_256_enc;
663 		key_data->decrypt = XTS_AES_256_dec;
664 		break;
665 	default:
666 		assert(0);
667 		free(key_data);
668 		return -EINVAL;
669 	}
670 
671 	key->priv = key_data;
672 
673 	return 0;
674 #else
675 	return -ENOTSUP;
676 #endif
677 }
678 
679 static int
680 sw_accel_crypto_key_init(struct spdk_accel_crypto_key *key)
681 {
682 	return sw_accel_create_aes_xts(key);
683 }
684 
685 static void
686 sw_accel_crypto_key_deinit(struct spdk_accel_crypto_key *key)
687 {
688 	if (!key || key->module_if != &g_sw_module || !key->priv) {
689 		return;
690 	}
691 
692 	free(key->priv);
693 }
694 
695 static bool
696 sw_accel_crypto_supports_tweak_mode(enum spdk_accel_crypto_tweak_mode tweak_mode)
697 {
698 	return tweak_mode == SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA;
699 }
700 
701 static bool
702 sw_accel_crypto_supports_cipher(enum spdk_accel_cipher cipher, size_t key_size)
703 {
704 	switch (cipher) {
705 	case SPDK_ACCEL_CIPHER_AES_XTS:
706 		return key_size == SPDK_ACCEL_AES_XTS_128_KEY_SIZE || key_size == SPDK_ACCEL_AES_XTS_256_KEY_SIZE;
707 	default:
708 		return false;
709 	}
710 }
711 
712 static int
713 sw_accel_get_operation_info(enum spdk_accel_opcode opcode,
714 			    const struct spdk_accel_operation_exec_ctx *ctx,
715 			    struct spdk_accel_opcode_info *info)
716 {
717 	info->required_alignment = 0;
718 
719 	return 0;
720 }
721 
722 static struct spdk_accel_module_if g_sw_module = {
723 	.module_init			= sw_accel_module_init,
724 	.module_fini			= sw_accel_module_fini,
725 	.write_config_json		= NULL,
726 	.get_ctx_size			= sw_accel_module_get_ctx_size,
727 	.name				= "software",
728 	.priority			= SPDK_ACCEL_SW_PRIORITY,
729 	.supports_opcode		= sw_accel_supports_opcode,
730 	.get_io_channel			= sw_accel_get_io_channel,
731 	.submit_tasks			= sw_accel_submit_tasks,
732 	.crypto_key_init		= sw_accel_crypto_key_init,
733 	.crypto_key_deinit		= sw_accel_crypto_key_deinit,
734 	.crypto_supports_tweak_mode	= sw_accel_crypto_supports_tweak_mode,
735 	.crypto_supports_cipher		= sw_accel_crypto_supports_cipher,
736 	.get_operation_info		= sw_accel_get_operation_info,
737 };
738 
739 SPDK_ACCEL_MODULE_REGISTER(sw, &g_sw_module)
740