xref: /spdk/lib/accel/accel_sw.c (revision 2b46c17df1ed540c34053c2143274e185c302bee)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/accel_module.h"
10 #include "accel_internal.h"
11 
12 #include "spdk/env.h"
13 #include "spdk/likely.h"
14 #include "spdk/log.h"
15 #include "spdk/thread.h"
16 #include "spdk/json.h"
17 #include "spdk/crc32.h"
18 #include "spdk/util.h"
19 #include "spdk/xor.h"
20 #include "spdk/dif.h"
21 
22 #ifdef SPDK_CONFIG_ISAL
23 #include "../isa-l/include/igzip_lib.h"
24 #ifdef SPDK_CONFIG_ISAL_CRYPTO
25 #include "../isa-l-crypto/include/aes_xts.h"
26 #include "../isa-l-crypto/include/isal_crypto_api.h"
27 #endif
28 #endif
29 
30 /* Per the AES-XTS spec, the size of data unit cannot be bigger than 2^20 blocks, 128b each block */
31 #define ACCEL_AES_XTS_MAX_BLOCK_SIZE (1 << 24)
32 
33 struct sw_accel_io_channel {
34 	/* for ISAL */
35 #ifdef SPDK_CONFIG_ISAL
36 	struct isal_zstream		stream;
37 	struct inflate_state		state;
38 #endif
39 	struct spdk_poller		*completion_poller;
40 	STAILQ_HEAD(, spdk_accel_task)	tasks_to_complete;
41 };
42 
43 typedef int (*sw_accel_crypto_op)(const uint8_t *k2, const uint8_t *k1,
44 				  const uint8_t *initial_tweak, const uint64_t len_bytes,
45 				  const void *in, void *out);
46 
47 struct sw_accel_crypto_key_data {
48 	sw_accel_crypto_op encrypt;
49 	sw_accel_crypto_op decrypt;
50 };
51 
52 static struct spdk_accel_module_if g_sw_module;
53 
54 static void sw_accel_crypto_key_deinit(struct spdk_accel_crypto_key *_key);
55 static int sw_accel_crypto_key_init(struct spdk_accel_crypto_key *key);
56 static bool sw_accel_crypto_supports_tweak_mode(enum spdk_accel_crypto_tweak_mode tweak_mode);
57 static bool sw_accel_crypto_supports_cipher(enum spdk_accel_cipher cipher, size_t key_size);
58 
59 /* Post SW completions to a list; processed by ->completion_poller. */
60 inline static void
61 _add_to_comp_list(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task, int status)
62 {
63 	accel_task->status = status;
64 	STAILQ_INSERT_TAIL(&sw_ch->tasks_to_complete, accel_task, link);
65 }
66 
67 static bool
68 sw_accel_supports_opcode(enum spdk_accel_opcode opc)
69 {
70 	switch (opc) {
71 	case SPDK_ACCEL_OPC_COPY:
72 	case SPDK_ACCEL_OPC_FILL:
73 	case SPDK_ACCEL_OPC_DUALCAST:
74 	case SPDK_ACCEL_OPC_COMPARE:
75 	case SPDK_ACCEL_OPC_CRC32C:
76 	case SPDK_ACCEL_OPC_COPY_CRC32C:
77 	case SPDK_ACCEL_OPC_COMPRESS:
78 	case SPDK_ACCEL_OPC_DECOMPRESS:
79 	case SPDK_ACCEL_OPC_ENCRYPT:
80 	case SPDK_ACCEL_OPC_DECRYPT:
81 	case SPDK_ACCEL_OPC_XOR:
82 	case SPDK_ACCEL_OPC_DIF_VERIFY:
83 	case SPDK_ACCEL_OPC_DIF_GENERATE:
84 	case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
85 	case SPDK_ACCEL_OPC_DIF_VERIFY_COPY:
86 		return true;
87 	default:
88 		return false;
89 	}
90 }
91 
92 static int
93 _sw_accel_dualcast_iovs(struct iovec *dst_iovs, uint32_t dst_iovcnt,
94 			struct iovec *dst2_iovs, uint32_t dst2_iovcnt,
95 			struct iovec *src_iovs, uint32_t src_iovcnt)
96 {
97 	if (spdk_unlikely(dst_iovcnt != 1 || dst2_iovcnt != 1 || src_iovcnt != 1)) {
98 		return -EINVAL;
99 	}
100 
101 	if (spdk_unlikely(dst_iovs[0].iov_len != src_iovs[0].iov_len ||
102 			  dst_iovs[0].iov_len != dst2_iovs[0].iov_len)) {
103 		return -EINVAL;
104 	}
105 
106 	memcpy(dst_iovs[0].iov_base, src_iovs[0].iov_base, dst_iovs[0].iov_len);
107 	memcpy(dst2_iovs[0].iov_base, src_iovs[0].iov_base, dst_iovs[0].iov_len);
108 
109 	return 0;
110 }
111 
112 static void
113 _sw_accel_copy_iovs(struct iovec *dst_iovs, uint32_t dst_iovcnt,
114 		    struct iovec *src_iovs, uint32_t src_iovcnt)
115 {
116 	struct spdk_ioviter iter;
117 	void *src, *dst;
118 	size_t len;
119 
120 	for (len = spdk_ioviter_first(&iter, src_iovs, src_iovcnt,
121 				      dst_iovs, dst_iovcnt, &src, &dst);
122 	     len != 0;
123 	     len = spdk_ioviter_next(&iter, &src, &dst)) {
124 		memcpy(dst, src, len);
125 	}
126 }
127 
128 static int
129 _sw_accel_compare(struct iovec *src_iovs, uint32_t src_iovcnt,
130 		  struct iovec *src2_iovs, uint32_t src2_iovcnt)
131 {
132 	if (spdk_unlikely(src_iovcnt != 1 || src2_iovcnt != 1)) {
133 		return -EINVAL;
134 	}
135 
136 	if (spdk_unlikely(src_iovs[0].iov_len != src2_iovs[0].iov_len)) {
137 		return -EINVAL;
138 	}
139 
140 	return memcmp(src_iovs[0].iov_base, src2_iovs[0].iov_base, src_iovs[0].iov_len);
141 }
142 
143 static int
144 _sw_accel_fill(struct iovec *iovs, uint32_t iovcnt, uint8_t fill)
145 {
146 	void *dst;
147 	size_t nbytes;
148 
149 	if (spdk_unlikely(iovcnt != 1)) {
150 		return -EINVAL;
151 	}
152 
153 	dst = iovs[0].iov_base;
154 	nbytes = iovs[0].iov_len;
155 
156 	memset(dst, fill, nbytes);
157 
158 	return 0;
159 }
160 
161 static void
162 _sw_accel_crc32cv(uint32_t *crc_dst, struct iovec *iov, uint32_t iovcnt, uint32_t seed)
163 {
164 	*crc_dst = spdk_crc32c_iov_update(iov, iovcnt, ~seed);
165 }
166 
167 static int
168 _sw_accel_compress(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
169 {
170 #ifdef SPDK_CONFIG_ISAL
171 	size_t last_seglen = accel_task->s.iovs[accel_task->s.iovcnt - 1].iov_len;
172 	struct iovec *siov = accel_task->s.iovs;
173 	struct iovec *diov = accel_task->d.iovs;
174 	size_t remaining;
175 	uint32_t i, s = 0, d = 0;
176 	int rc = 0;
177 
178 	remaining = 0;
179 	for (i = 0; i < accel_task->s.iovcnt; ++i) {
180 		remaining += accel_task->s.iovs[i].iov_len;
181 	}
182 
183 	isal_deflate_reset(&sw_ch->stream);
184 	sw_ch->stream.end_of_stream = 0;
185 	sw_ch->stream.next_out = diov[d].iov_base;
186 	sw_ch->stream.avail_out = diov[d].iov_len;
187 	sw_ch->stream.next_in = siov[s].iov_base;
188 	sw_ch->stream.avail_in = siov[s].iov_len;
189 
190 	do {
191 		/* if isal has exhausted the current dst iovec, move to the next
192 		 * one if there is one */
193 		if (sw_ch->stream.avail_out == 0) {
194 			if (++d < accel_task->d.iovcnt) {
195 				sw_ch->stream.next_out = diov[d].iov_base;
196 				sw_ch->stream.avail_out = diov[d].iov_len;
197 				assert(sw_ch->stream.avail_out > 0);
198 			} else {
199 				/* we have no avail_out but also no more iovecs left so this is
200 				* the case where either the output buffer was a perfect fit
201 				* or not enough was provided.  Check the ISAL state to determine
202 				* which. */
203 				if (sw_ch->stream.internal_state.state != ZSTATE_END) {
204 					SPDK_ERRLOG("Not enough destination buffer provided.\n");
205 					rc = -ENOMEM;
206 				}
207 				break;
208 			}
209 		}
210 
211 		/* if isal has exhausted the current src iovec, move to the next
212 		 * one if there is one */
213 		if (sw_ch->stream.avail_in == 0 && ((s + 1) < accel_task->s.iovcnt)) {
214 			s++;
215 			sw_ch->stream.next_in = siov[s].iov_base;
216 			sw_ch->stream.avail_in = siov[s].iov_len;
217 			assert(sw_ch->stream.avail_in > 0);
218 		}
219 
220 		if (remaining <= last_seglen) {
221 			/* Need to set end of stream on last block */
222 			sw_ch->stream.end_of_stream = 1;
223 		}
224 
225 		rc = isal_deflate(&sw_ch->stream);
226 		if (rc) {
227 			SPDK_ERRLOG("isal_deflate returned error %d.\n", rc);
228 		}
229 
230 		if (remaining > 0) {
231 			assert(siov[s].iov_len > sw_ch->stream.avail_in);
232 			remaining -= (siov[s].iov_len - sw_ch->stream.avail_in);
233 		}
234 
235 	} while (remaining > 0 || sw_ch->stream.avail_out == 0);
236 	assert(sw_ch->stream.avail_in  == 0);
237 
238 	/* Get our total output size */
239 	if (accel_task->output_size != NULL) {
240 		assert(sw_ch->stream.total_out > 0);
241 		*accel_task->output_size = sw_ch->stream.total_out;
242 	}
243 
244 	return rc;
245 #else
246 	SPDK_ERRLOG("ISAL option is required to use software compression.\n");
247 	return -EINVAL;
248 #endif
249 }
250 
251 static int
252 _sw_accel_decompress(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
253 {
254 #ifdef SPDK_CONFIG_ISAL
255 	struct iovec *siov = accel_task->s.iovs;
256 	struct iovec *diov = accel_task->d.iovs;
257 	uint32_t s = 0, d = 0;
258 	int rc = 0;
259 
260 	isal_inflate_reset(&sw_ch->state);
261 	sw_ch->state.next_out = diov[d].iov_base;
262 	sw_ch->state.avail_out = diov[d].iov_len;
263 	sw_ch->state.next_in = siov[s].iov_base;
264 	sw_ch->state.avail_in = siov[s].iov_len;
265 
266 	do {
267 		/* if isal has exhausted the current dst iovec, move to the next
268 		 * one if there is one */
269 		if (sw_ch->state.avail_out == 0 && ((d + 1) < accel_task->d.iovcnt)) {
270 			d++;
271 			sw_ch->state.next_out = diov[d].iov_base;
272 			sw_ch->state.avail_out = diov[d].iov_len;
273 			assert(sw_ch->state.avail_out > 0);
274 		}
275 
276 		/* if isal has exhausted the current src iovec, move to the next
277 		 * one if there is one */
278 		if (sw_ch->state.avail_in == 0 && ((s + 1) < accel_task->s.iovcnt)) {
279 			s++;
280 			sw_ch->state.next_in = siov[s].iov_base;
281 			sw_ch->state.avail_in = siov[s].iov_len;
282 			assert(sw_ch->state.avail_in > 0);
283 		}
284 
285 		rc = isal_inflate(&sw_ch->state);
286 		if (rc) {
287 			SPDK_ERRLOG("isal_inflate returned error %d.\n", rc);
288 		}
289 
290 	} while (sw_ch->state.block_state < ISAL_BLOCK_FINISH);
291 	assert(sw_ch->state.avail_in == 0);
292 
293 	/* Get our total output size */
294 	if (accel_task->output_size != NULL) {
295 		assert(sw_ch->state.total_out > 0);
296 		*accel_task->output_size = sw_ch->state.total_out;
297 	}
298 
299 	return rc;
300 #else
301 	SPDK_ERRLOG("ISAL option is required to use software decompression.\n");
302 	return -EINVAL;
303 #endif
304 }
305 
306 static int
307 _sw_accel_crypto_operation(struct spdk_accel_task *accel_task, struct spdk_accel_crypto_key *key,
308 			   sw_accel_crypto_op op)
309 {
310 #ifdef SPDK_CONFIG_ISAL_CRYPTO
311 	uint64_t iv[2];
312 	size_t remaining_len, dst_len;
313 	uint64_t src_offset = 0, dst_offset = 0;
314 	uint32_t src_iovpos = 0, dst_iovpos = 0, src_iovcnt, dst_iovcnt;
315 	uint32_t i, block_size, crypto_len, crypto_accum_len = 0;
316 	struct iovec *src_iov, *dst_iov;
317 	uint8_t *src, *dst;
318 	int rc;
319 
320 	/* iv is 128 bits, since we are using logical block address (64 bits) as iv, fill first 8 bytes with zeroes */
321 	iv[0] = 0;
322 	iv[1] = accel_task->iv;
323 	src_iov = accel_task->s.iovs;
324 	src_iovcnt = accel_task->s.iovcnt;
325 	if (accel_task->d.iovcnt) {
326 		dst_iov = accel_task->d.iovs;
327 		dst_iovcnt = accel_task->d.iovcnt;
328 	} else {
329 		/* inplace operation */
330 		dst_iov = accel_task->s.iovs;
331 		dst_iovcnt = accel_task->s.iovcnt;
332 	}
333 	block_size = accel_task->block_size;
334 
335 	if (!src_iovcnt || !dst_iovcnt || !block_size || !op) {
336 		SPDK_ERRLOG("src_iovcnt %d, dst_iovcnt %d, block_size %d, op %p\n", src_iovcnt, dst_iovcnt,
337 			    block_size, op);
338 		return -EINVAL;
339 	}
340 
341 	remaining_len = 0;
342 	for (i = 0; i < src_iovcnt; i++) {
343 		remaining_len += src_iov[i].iov_len;
344 	}
345 	dst_len = 0;
346 	for (i = 0; i < dst_iovcnt; i++) {
347 		dst_len += dst_iov[i].iov_len;
348 	}
349 
350 	if (spdk_unlikely(remaining_len != dst_len || !remaining_len)) {
351 		return -ERANGE;
352 	}
353 	if (spdk_unlikely(remaining_len % accel_task->block_size != 0)) {
354 		return -EINVAL;
355 	}
356 
357 	while (remaining_len) {
358 		crypto_len = spdk_min(block_size - crypto_accum_len, src_iov->iov_len - src_offset);
359 		crypto_len = spdk_min(crypto_len, dst_iov->iov_len - dst_offset);
360 		src = (uint8_t *)src_iov->iov_base + src_offset;
361 		dst = (uint8_t *)dst_iov->iov_base + dst_offset;
362 
363 		rc = op((uint8_t *)key->key2, (uint8_t *)key->key, (uint8_t *)iv, crypto_len, src, dst);
364 		if (rc != ISAL_CRYPTO_ERR_NONE) {
365 			break;
366 		}
367 
368 		src_offset += crypto_len;
369 		dst_offset += crypto_len;
370 		crypto_accum_len += crypto_len;
371 		remaining_len -= crypto_len;
372 
373 		if (crypto_accum_len == block_size) {
374 			/* we can process part of logical block. Once the whole block is processed, increment iv */
375 			crypto_accum_len = 0;
376 			iv[1]++;
377 		}
378 		if (src_offset == src_iov->iov_len) {
379 			src_iov++;
380 			src_iovpos++;
381 			src_offset = 0;
382 		}
383 		if (src_iovpos == src_iovcnt) {
384 			break;
385 		}
386 		if (dst_offset == dst_iov->iov_len) {
387 			dst_iov++;
388 			dst_iovpos++;
389 			dst_offset = 0;
390 		}
391 		if (dst_iovpos == dst_iovcnt) {
392 			break;
393 		}
394 	}
395 
396 	if (remaining_len) {
397 		SPDK_ERRLOG("remaining len %zu\n", remaining_len);
398 		return -EINVAL;
399 	}
400 
401 	return 0;
402 #else
403 	return -ENOTSUP;
404 #endif
405 }
406 
407 static int
408 _sw_accel_encrypt(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
409 {
410 	struct spdk_accel_crypto_key *key;
411 	struct sw_accel_crypto_key_data *key_data;
412 
413 	key = accel_task->crypto_key;
414 	if (spdk_unlikely(key->module_if != &g_sw_module || !key->priv)) {
415 		return -EINVAL;
416 	}
417 	if (spdk_unlikely(accel_task->block_size > ACCEL_AES_XTS_MAX_BLOCK_SIZE)) {
418 		SPDK_WARNLOG("Max block size for AES_XTS is limited to %u, current size %u\n",
419 			     ACCEL_AES_XTS_MAX_BLOCK_SIZE, accel_task->block_size);
420 		return -ERANGE;
421 	}
422 	key_data = key->priv;
423 	return _sw_accel_crypto_operation(accel_task, key, key_data->encrypt);
424 }
425 
426 static int
427 _sw_accel_decrypt(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
428 {
429 	struct spdk_accel_crypto_key *key;
430 	struct sw_accel_crypto_key_data *key_data;
431 
432 	key = accel_task->crypto_key;
433 	if (spdk_unlikely(key->module_if != &g_sw_module || !key->priv)) {
434 		return -EINVAL;
435 	}
436 	if (spdk_unlikely(accel_task->block_size > ACCEL_AES_XTS_MAX_BLOCK_SIZE)) {
437 		SPDK_WARNLOG("Max block size for AES_XTS is limited to %u, current size %u\n",
438 			     ACCEL_AES_XTS_MAX_BLOCK_SIZE, accel_task->block_size);
439 		return -ERANGE;
440 	}
441 	key_data = key->priv;
442 	return _sw_accel_crypto_operation(accel_task, key, key_data->decrypt);
443 }
444 
445 static int
446 _sw_accel_xor(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
447 {
448 	return spdk_xor_gen(accel_task->d.iovs[0].iov_base,
449 			    accel_task->nsrcs.srcs,
450 			    accel_task->nsrcs.cnt,
451 			    accel_task->d.iovs[0].iov_len);
452 }
453 
454 static int
455 _sw_accel_dif_verify(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
456 {
457 	return spdk_dif_verify(accel_task->s.iovs,
458 			       accel_task->s.iovcnt,
459 			       accel_task->dif.num_blocks,
460 			       accel_task->dif.ctx,
461 			       accel_task->dif.err);
462 }
463 
464 static int
465 _sw_accel_dif_verify_copy(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
466 {
467 	return spdk_dif_verify_copy(accel_task->d.iovs,
468 				    accel_task->d.iovcnt,
469 				    accel_task->s.iovs,
470 				    accel_task->s.iovcnt,
471 				    accel_task->dif.num_blocks,
472 				    accel_task->dif.ctx,
473 				    accel_task->dif.err);
474 }
475 
476 static int
477 _sw_accel_dif_generate(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
478 {
479 	return spdk_dif_generate(accel_task->s.iovs,
480 				 accel_task->s.iovcnt,
481 				 accel_task->dif.num_blocks,
482 				 accel_task->dif.ctx);
483 }
484 
485 static int
486 _sw_accel_dif_generate_copy(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
487 {
488 	return spdk_dif_generate_copy(accel_task->s.iovs,
489 				      accel_task->s.iovcnt,
490 				      accel_task->d.iovs,
491 				      accel_task->d.iovcnt,
492 				      accel_task->dif.num_blocks,
493 				      accel_task->dif.ctx);
494 }
495 
496 static int
497 accel_comp_poll(void *arg)
498 {
499 	struct sw_accel_io_channel	*sw_ch = arg;
500 	STAILQ_HEAD(, spdk_accel_task)	tasks_to_complete;
501 	struct spdk_accel_task		*accel_task;
502 
503 	if (STAILQ_EMPTY(&sw_ch->tasks_to_complete)) {
504 		return SPDK_POLLER_IDLE;
505 	}
506 
507 	STAILQ_INIT(&tasks_to_complete);
508 	STAILQ_SWAP(&tasks_to_complete, &sw_ch->tasks_to_complete, spdk_accel_task);
509 
510 	while ((accel_task = STAILQ_FIRST(&tasks_to_complete))) {
511 		STAILQ_REMOVE_HEAD(&tasks_to_complete, link);
512 		spdk_accel_task_complete(accel_task, accel_task->status);
513 	}
514 
515 	return SPDK_POLLER_BUSY;
516 }
517 
518 static int
519 sw_accel_submit_tasks(struct spdk_io_channel *ch, struct spdk_accel_task *accel_task)
520 {
521 	struct sw_accel_io_channel *sw_ch = spdk_io_channel_get_ctx(ch);
522 	struct spdk_accel_task *tmp;
523 	int rc = 0;
524 
525 	/*
526 	 * Lazily initialize our completion poller. We don't want to complete
527 	 * them inline as they'll likely submit another.
528 	 */
529 	if (spdk_unlikely(sw_ch->completion_poller == NULL)) {
530 		sw_ch->completion_poller = SPDK_POLLER_REGISTER(accel_comp_poll, sw_ch, 0);
531 	}
532 
533 	do {
534 		switch (accel_task->op_code) {
535 		case SPDK_ACCEL_OPC_COPY:
536 			_sw_accel_copy_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
537 					    accel_task->s.iovs, accel_task->s.iovcnt);
538 			break;
539 		case SPDK_ACCEL_OPC_FILL:
540 			rc = _sw_accel_fill(accel_task->d.iovs, accel_task->d.iovcnt,
541 					    accel_task->fill_pattern);
542 			break;
543 		case SPDK_ACCEL_OPC_DUALCAST:
544 			rc = _sw_accel_dualcast_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
545 						     accel_task->d2.iovs, accel_task->d2.iovcnt,
546 						     accel_task->s.iovs, accel_task->s.iovcnt);
547 			break;
548 		case SPDK_ACCEL_OPC_COMPARE:
549 			rc = _sw_accel_compare(accel_task->s.iovs, accel_task->s.iovcnt,
550 					       accel_task->s2.iovs, accel_task->s2.iovcnt);
551 			break;
552 		case SPDK_ACCEL_OPC_CRC32C:
553 			_sw_accel_crc32cv(accel_task->crc_dst, accel_task->s.iovs, accel_task->s.iovcnt, accel_task->seed);
554 			break;
555 		case SPDK_ACCEL_OPC_COPY_CRC32C:
556 			_sw_accel_copy_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
557 					    accel_task->s.iovs, accel_task->s.iovcnt);
558 			_sw_accel_crc32cv(accel_task->crc_dst, accel_task->s.iovs,
559 					  accel_task->s.iovcnt, accel_task->seed);
560 			break;
561 		case SPDK_ACCEL_OPC_COMPRESS:
562 			rc = _sw_accel_compress(sw_ch, accel_task);
563 			break;
564 		case SPDK_ACCEL_OPC_DECOMPRESS:
565 			rc = _sw_accel_decompress(sw_ch, accel_task);
566 			break;
567 		case SPDK_ACCEL_OPC_XOR:
568 			rc = _sw_accel_xor(sw_ch, accel_task);
569 			break;
570 		case SPDK_ACCEL_OPC_ENCRYPT:
571 			rc = _sw_accel_encrypt(sw_ch, accel_task);
572 			break;
573 		case SPDK_ACCEL_OPC_DECRYPT:
574 			rc = _sw_accel_decrypt(sw_ch, accel_task);
575 			break;
576 		case SPDK_ACCEL_OPC_DIF_VERIFY:
577 			rc = _sw_accel_dif_verify(sw_ch, accel_task);
578 			break;
579 		case SPDK_ACCEL_OPC_DIF_VERIFY_COPY:
580 			rc = _sw_accel_dif_verify_copy(sw_ch, accel_task);
581 			break;
582 		case SPDK_ACCEL_OPC_DIF_GENERATE:
583 			rc = _sw_accel_dif_generate(sw_ch, accel_task);
584 			break;
585 		case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
586 			rc = _sw_accel_dif_generate_copy(sw_ch, accel_task);
587 			break;
588 		default:
589 			assert(false);
590 			break;
591 		}
592 
593 		tmp = STAILQ_NEXT(accel_task, link);
594 
595 		_add_to_comp_list(sw_ch, accel_task, rc);
596 
597 		accel_task = tmp;
598 	} while (accel_task);
599 
600 	return 0;
601 }
602 
603 static int
604 sw_accel_create_cb(void *io_device, void *ctx_buf)
605 {
606 	struct sw_accel_io_channel *sw_ch = ctx_buf;
607 
608 	STAILQ_INIT(&sw_ch->tasks_to_complete);
609 	sw_ch->completion_poller = NULL;
610 
611 #ifdef SPDK_CONFIG_ISAL
612 	isal_deflate_init(&sw_ch->stream);
613 	sw_ch->stream.flush = NO_FLUSH;
614 	sw_ch->stream.level = 1;
615 	sw_ch->stream.level_buf = calloc(1, ISAL_DEF_LVL1_DEFAULT);
616 	if (sw_ch->stream.level_buf == NULL) {
617 		SPDK_ERRLOG("Could not allocate isal internal buffer\n");
618 		return -ENOMEM;
619 	}
620 	sw_ch->stream.level_buf_size = ISAL_DEF_LVL1_DEFAULT;
621 	isal_inflate_init(&sw_ch->state);
622 #endif
623 
624 	return 0;
625 }
626 
627 static void
628 sw_accel_destroy_cb(void *io_device, void *ctx_buf)
629 {
630 	struct sw_accel_io_channel *sw_ch = ctx_buf;
631 
632 #ifdef SPDK_CONFIG_ISAL
633 	free(sw_ch->stream.level_buf);
634 #endif
635 
636 	spdk_poller_unregister(&sw_ch->completion_poller);
637 }
638 
639 static struct spdk_io_channel *
640 sw_accel_get_io_channel(void)
641 {
642 	return spdk_get_io_channel(&g_sw_module);
643 }
644 
645 static size_t
646 sw_accel_module_get_ctx_size(void)
647 {
648 	return sizeof(struct spdk_accel_task);
649 }
650 
651 static int
652 sw_accel_module_init(void)
653 {
654 	spdk_io_device_register(&g_sw_module, sw_accel_create_cb, sw_accel_destroy_cb,
655 				sizeof(struct sw_accel_io_channel), "sw_accel_module");
656 
657 	return 0;
658 }
659 
660 static void
661 sw_accel_module_fini(void *ctxt)
662 {
663 	spdk_io_device_unregister(&g_sw_module, NULL);
664 	spdk_accel_module_finish();
665 }
666 
667 static int
668 sw_accel_create_aes_xts(struct spdk_accel_crypto_key *key)
669 {
670 #ifdef SPDK_CONFIG_ISAL_CRYPTO
671 	struct sw_accel_crypto_key_data *key_data;
672 
673 	key_data = calloc(1, sizeof(*key_data));
674 	if (!key_data) {
675 		return -ENOMEM;
676 	}
677 
678 	switch (key->key_size) {
679 	case SPDK_ACCEL_AES_XTS_128_KEY_SIZE:
680 		key_data->encrypt = isal_aes_xts_enc_128;
681 		key_data->decrypt = isal_aes_xts_dec_128;
682 		break;
683 	case SPDK_ACCEL_AES_XTS_256_KEY_SIZE:
684 		key_data->encrypt = isal_aes_xts_enc_256;
685 		key_data->decrypt = isal_aes_xts_dec_256;
686 		break;
687 	default:
688 		assert(0);
689 		free(key_data);
690 		return -EINVAL;
691 	}
692 
693 	key->priv = key_data;
694 
695 	return 0;
696 #else
697 	return -ENOTSUP;
698 #endif
699 }
700 
701 static int
702 sw_accel_crypto_key_init(struct spdk_accel_crypto_key *key)
703 {
704 	return sw_accel_create_aes_xts(key);
705 }
706 
707 static void
708 sw_accel_crypto_key_deinit(struct spdk_accel_crypto_key *key)
709 {
710 	if (!key || key->module_if != &g_sw_module || !key->priv) {
711 		return;
712 	}
713 
714 	free(key->priv);
715 }
716 
717 static bool
718 sw_accel_crypto_supports_tweak_mode(enum spdk_accel_crypto_tweak_mode tweak_mode)
719 {
720 	return tweak_mode == SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA;
721 }
722 
723 static bool
724 sw_accel_crypto_supports_cipher(enum spdk_accel_cipher cipher, size_t key_size)
725 {
726 	switch (cipher) {
727 	case SPDK_ACCEL_CIPHER_AES_XTS:
728 		return key_size == SPDK_ACCEL_AES_XTS_128_KEY_SIZE || key_size == SPDK_ACCEL_AES_XTS_256_KEY_SIZE;
729 	default:
730 		return false;
731 	}
732 }
733 
734 static int
735 sw_accel_get_operation_info(enum spdk_accel_opcode opcode,
736 			    const struct spdk_accel_operation_exec_ctx *ctx,
737 			    struct spdk_accel_opcode_info *info)
738 {
739 	info->required_alignment = 0;
740 
741 	return 0;
742 }
743 
744 static struct spdk_accel_module_if g_sw_module = {
745 	.module_init			= sw_accel_module_init,
746 	.module_fini			= sw_accel_module_fini,
747 	.write_config_json		= NULL,
748 	.get_ctx_size			= sw_accel_module_get_ctx_size,
749 	.name				= "software",
750 	.priority			= SPDK_ACCEL_SW_PRIORITY,
751 	.supports_opcode		= sw_accel_supports_opcode,
752 	.get_io_channel			= sw_accel_get_io_channel,
753 	.submit_tasks			= sw_accel_submit_tasks,
754 	.crypto_key_init		= sw_accel_crypto_key_init,
755 	.crypto_key_deinit		= sw_accel_crypto_key_deinit,
756 	.crypto_supports_tweak_mode	= sw_accel_crypto_supports_tweak_mode,
757 	.crypto_supports_cipher		= sw_accel_crypto_supports_cipher,
758 	.get_operation_info		= sw_accel_get_operation_info,
759 };
760 
761 SPDK_ACCEL_MODULE_REGISTER(sw, &g_sw_module)
762