xref: /spdk/lib/accel/accel_sw.c (revision a8d21b9b550dde7d3e7ffc0cd1171528a136165f)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/accel_module.h"
10 #include "accel_internal.h"
11 
12 #include "spdk/env.h"
13 #include "spdk/likely.h"
14 #include "spdk/log.h"
15 #include "spdk/thread.h"
16 #include "spdk/json.h"
17 #include "spdk/crc32.h"
18 #include "spdk/util.h"
19 
20 #ifdef SPDK_CONFIG_PMDK
21 #include "libpmem.h"
22 #endif
23 
24 #ifdef SPDK_CONFIG_ISAL
25 #include "../isa-l/include/igzip_lib.h"
26 #ifdef SPDK_CONFIG_ISAL_CRYPTO
27 #include "../isa-l-crypto/include/aes_xts.h"
28 #endif
29 #endif
30 
31 #define ACCEL_AES_XTS_128_KEY_SIZE 16
32 #define ACCEL_AES_XTS_256_KEY_SIZE 32
33 #define ACCEL_AES_XTS "AES_XTS"
34 /* Per the AES-XTS spec, the size of data unit cannot be bigger than 2^20 blocks, 128b each block */
35 #define ACCEL_AES_XTS_MAX_BLOCK_SIZE (1 << 24)
36 
37 struct sw_accel_io_channel {
38 	/* for ISAL */
39 #ifdef SPDK_CONFIG_ISAL
40 	struct isal_zstream		stream;
41 	struct inflate_state		state;
42 #endif
43 	struct spdk_poller		*completion_poller;
44 	TAILQ_HEAD(, spdk_accel_task)	tasks_to_complete;
45 };
46 
47 typedef void (*sw_accel_crypto_op)(uint8_t *k2, uint8_t *k1, uint8_t *tweak, uint64_t lba_size,
48 				   const uint8_t *src, uint8_t *dst);
49 
50 struct sw_accel_crypto_key_data {
51 	sw_accel_crypto_op encrypt;
52 	sw_accel_crypto_op decrypt;
53 };
54 
55 static struct spdk_accel_module_if g_sw_module;
56 
57 static void sw_accel_crypto_key_deinit(struct spdk_accel_crypto_key *_key);
58 static int sw_accel_crypto_key_init(struct spdk_accel_crypto_key *key);
59 
60 /* Post SW completions to a list and complete in a poller as we don't want to
61  * complete them on the caller's stack as they'll likely submit another. */
62 inline static void
63 _add_to_comp_list(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task, int status)
64 {
65 	accel_task->status = status;
66 	TAILQ_INSERT_TAIL(&sw_ch->tasks_to_complete, accel_task, link);
67 }
68 
69 /* Used when the SW engine is selected and the durable flag is set. */
70 inline static int
71 _check_flags(int flags)
72 {
73 	if (flags & ACCEL_FLAG_PERSISTENT) {
74 #ifndef SPDK_CONFIG_PMDK
75 		/* PMDK is required to use this flag. */
76 		SPDK_ERRLOG("ACCEL_FLAG_PERSISTENT set but PMDK not configured. Configure PMDK or do not use this flag.\n");
77 		return -EINVAL;
78 #endif
79 	}
80 	return 0;
81 }
82 
83 static bool
84 sw_accel_supports_opcode(enum accel_opcode opc)
85 {
86 	switch (opc) {
87 	case ACCEL_OPC_COPY:
88 	case ACCEL_OPC_FILL:
89 	case ACCEL_OPC_DUALCAST:
90 	case ACCEL_OPC_COMPARE:
91 	case ACCEL_OPC_CRC32C:
92 	case ACCEL_OPC_COPY_CRC32C:
93 	case ACCEL_OPC_COMPRESS:
94 	case ACCEL_OPC_DECOMPRESS:
95 	case ACCEL_OPC_ENCRYPT:
96 	case ACCEL_OPC_DECRYPT:
97 		return true;
98 	default:
99 		return false;
100 	}
101 }
102 
103 static inline void
104 _pmem_memcpy(void *dst, const void *src, size_t len)
105 {
106 #ifdef SPDK_CONFIG_PMDK
107 	int is_pmem = pmem_is_pmem(dst, len);
108 
109 	if (is_pmem) {
110 		pmem_memcpy_persist(dst, src, len);
111 	} else {
112 		memcpy(dst, src, len);
113 		pmem_msync(dst, len);
114 	}
115 #else
116 	SPDK_ERRLOG("Function not defined without SPDK_CONFIG_PMDK enabled.\n");
117 	assert(0);
118 #endif
119 }
120 
121 static void
122 _sw_accel_dualcast(void *dst1, void *dst2, void *src, size_t nbytes, int flags)
123 {
124 	if (flags & ACCEL_FLAG_PERSISTENT) {
125 		_pmem_memcpy(dst1, src, nbytes);
126 		_pmem_memcpy(dst2, src, nbytes);
127 	} else {
128 		memcpy(dst1, src, nbytes);
129 		memcpy(dst2, src, nbytes);
130 	}
131 }
132 
133 static int
134 _sw_accel_dualcast_iovs(struct iovec *dst_iovs, uint32_t dst_iovcnt,
135 			struct iovec *dst2_iovs, uint32_t dst2_iovcnt,
136 			struct iovec *src_iovs, uint32_t src_iovcnt, int flags)
137 {
138 	if (spdk_unlikely(dst_iovcnt != 1 || dst2_iovcnt != 1 || src_iovcnt != 1)) {
139 		return -EINVAL;
140 	}
141 
142 	if (spdk_unlikely(dst_iovs[0].iov_len != src_iovs[0].iov_len ||
143 			  dst_iovs[0].iov_len != dst2_iovs[0].iov_len)) {
144 		return -EINVAL;
145 	}
146 
147 	_sw_accel_dualcast(dst_iovs[0].iov_base, dst2_iovs[0].iov_base, src_iovs[0].iov_base,
148 			   dst_iovs[0].iov_len, flags);
149 
150 	return 0;
151 }
152 
153 static void
154 _sw_accel_copy(void *dst, void *src, size_t nbytes, int flags)
155 {
156 
157 	if (flags & ACCEL_FLAG_PERSISTENT) {
158 		_pmem_memcpy(dst, src, nbytes);
159 	} else {
160 		memcpy(dst, src, nbytes);
161 	}
162 }
163 
164 static void
165 _sw_accel_copy_iovs(struct iovec *dst_iovs, uint32_t dst_iovcnt,
166 		    struct iovec *src_iovs, uint32_t src_iovcnt, int flags)
167 {
168 	struct spdk_ioviter iter;
169 	void *src, *dst;
170 	size_t len;
171 
172 	for (len = spdk_ioviter_first(&iter, src_iovs, src_iovcnt,
173 				      dst_iovs, dst_iovcnt, &src, &dst);
174 	     len != 0;
175 	     len = spdk_ioviter_next(&iter, &src, &dst)) {
176 		_sw_accel_copy(dst, src, len, flags);
177 	}
178 }
179 
180 static int
181 _sw_accel_compare(struct iovec *src_iovs, uint32_t src_iovcnt,
182 		  struct iovec *src2_iovs, uint32_t src2_iovcnt)
183 {
184 	if (spdk_unlikely(src_iovcnt != 1 || src2_iovcnt != 1)) {
185 		return -EINVAL;
186 	}
187 
188 	if (spdk_unlikely(src_iovs[0].iov_len != src2_iovs[0].iov_len)) {
189 		return -EINVAL;
190 	}
191 
192 	return memcmp(src_iovs[0].iov_base, src2_iovs[0].iov_base, src_iovs[0].iov_len);
193 }
194 
195 static int
196 _sw_accel_fill(struct iovec *iovs, uint32_t iovcnt, uint8_t fill, int flags)
197 {
198 	void *dst;
199 	size_t nbytes;
200 
201 	if (spdk_unlikely(iovcnt != 1)) {
202 		return -EINVAL;
203 	}
204 
205 	dst = iovs[0].iov_base;
206 	nbytes = iovs[0].iov_len;
207 
208 	if (flags & ACCEL_FLAG_PERSISTENT) {
209 #ifdef SPDK_CONFIG_PMDK
210 		int is_pmem = pmem_is_pmem(dst, nbytes);
211 
212 		if (is_pmem) {
213 			pmem_memset_persist(dst, fill, nbytes);
214 		} else {
215 			memset(dst, fill, nbytes);
216 			pmem_msync(dst, nbytes);
217 		}
218 #else
219 		SPDK_ERRLOG("Function not defined without SPDK_CONFIG_PMDK enabled.\n");
220 		assert(0);
221 #endif
222 	} else {
223 		memset(dst, fill, nbytes);
224 	}
225 
226 	return 0;
227 }
228 
229 static void
230 _sw_accel_crc32cv(uint32_t *crc_dst, struct iovec *iov, uint32_t iovcnt, uint32_t seed)
231 {
232 	*crc_dst = spdk_crc32c_iov_update(iov, iovcnt, ~seed);
233 }
234 
235 static int
236 _sw_accel_compress(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
237 {
238 #ifdef SPDK_CONFIG_ISAL
239 	size_t last_seglen = accel_task->s.iovs[accel_task->s.iovcnt - 1].iov_len;
240 	struct iovec *siov = accel_task->s.iovs;
241 	struct iovec *diov = accel_task->d.iovs;
242 	size_t remaining;
243 	uint32_t i, s = 0, d = 0;
244 	int rc = 0;
245 
246 	remaining = 0;
247 	for (i = 0; i < accel_task->s.iovcnt; ++i) {
248 		remaining += accel_task->s.iovs[i].iov_len;
249 	}
250 
251 	isal_deflate_reset(&sw_ch->stream);
252 	sw_ch->stream.end_of_stream = 0;
253 	sw_ch->stream.next_out = diov[d].iov_base;
254 	sw_ch->stream.avail_out = diov[d].iov_len;
255 	sw_ch->stream.next_in = siov[s].iov_base;
256 	sw_ch->stream.avail_in = siov[s].iov_len;
257 
258 	do {
259 		/* if isal has exhausted the current dst iovec, move to the next
260 		 * one if there is one */
261 		if (sw_ch->stream.avail_out == 0) {
262 			if (++d < accel_task->d.iovcnt) {
263 				sw_ch->stream.next_out = diov[d].iov_base;
264 				sw_ch->stream.avail_out = diov[d].iov_len;
265 				assert(sw_ch->stream.avail_out > 0);
266 			} else {
267 				/* we have no avail_out but also no more iovecs left so this is
268 				* the case where either the output buffer was a perfect fit
269 				* or not enough was provided.  Check the ISAL state to determine
270 				* which. */
271 				if (sw_ch->stream.internal_state.state != ZSTATE_END) {
272 					SPDK_ERRLOG("Not enough destination buffer provided.\n");
273 					rc = -ENOMEM;
274 				}
275 				break;
276 			}
277 		}
278 
279 		/* if isal has exhausted the current src iovec, move to the next
280 		 * one if there is one */
281 		if (sw_ch->stream.avail_in == 0 && ((s + 1) < accel_task->s.iovcnt)) {
282 			s++;
283 			sw_ch->stream.next_in = siov[s].iov_base;
284 			sw_ch->stream.avail_in = siov[s].iov_len;
285 			assert(sw_ch->stream.avail_in > 0);
286 		}
287 
288 		if (remaining <= last_seglen) {
289 			/* Need to set end of stream on last block */
290 			sw_ch->stream.end_of_stream = 1;
291 		}
292 
293 		rc = isal_deflate(&sw_ch->stream);
294 		if (rc) {
295 			SPDK_ERRLOG("isal_deflate returned error %d.\n", rc);
296 		}
297 
298 		if (remaining > 0) {
299 			assert(siov[s].iov_len > sw_ch->stream.avail_in);
300 			remaining -= (siov[s].iov_len - sw_ch->stream.avail_in);
301 		}
302 
303 	} while (remaining > 0 || sw_ch->stream.avail_out == 0);
304 	assert(sw_ch->stream.avail_in  == 0);
305 
306 	/* Get our total output size */
307 	if (accel_task->output_size != NULL) {
308 		assert(sw_ch->stream.total_out > 0);
309 		*accel_task->output_size = sw_ch->stream.total_out;
310 	}
311 
312 	return rc;
313 #else
314 	SPDK_ERRLOG("ISAL option is required to use software compression.\n");
315 	return -EINVAL;
316 #endif
317 }
318 
319 static int
320 _sw_accel_decompress(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
321 {
322 #ifdef SPDK_CONFIG_ISAL
323 	struct iovec *siov = accel_task->s.iovs;
324 	struct iovec *diov = accel_task->d.iovs;
325 	uint32_t s = 0, d = 0;
326 	int rc = 0;
327 
328 	isal_inflate_reset(&sw_ch->state);
329 	sw_ch->state.next_out = diov[d].iov_base;
330 	sw_ch->state.avail_out = diov[d].iov_len;
331 	sw_ch->state.next_in = siov[s].iov_base;
332 	sw_ch->state.avail_in = siov[s].iov_len;
333 
334 	do {
335 		/* if isal has exhausted the current dst iovec, move to the next
336 		 * one if there is one */
337 		if (sw_ch->state.avail_out == 0 && ((d + 1) < accel_task->d.iovcnt)) {
338 			d++;
339 			sw_ch->state.next_out = diov[d].iov_base;
340 			sw_ch->state.avail_out = diov[d].iov_len;
341 			assert(sw_ch->state.avail_out > 0);
342 		}
343 
344 		/* if isal has exhausted the current src iovec, move to the next
345 		 * one if there is one */
346 		if (sw_ch->state.avail_in == 0 && ((s + 1) < accel_task->s.iovcnt)) {
347 			s++;
348 			sw_ch->state.next_in = siov[s].iov_base;
349 			sw_ch->state.avail_in = siov[s].iov_len;
350 			assert(sw_ch->state.avail_in > 0);
351 		}
352 
353 		rc = isal_inflate(&sw_ch->state);
354 		if (rc) {
355 			SPDK_ERRLOG("isal_inflate returned error %d.\n", rc);
356 		}
357 
358 	} while (sw_ch->state.block_state < ISAL_BLOCK_FINISH);
359 	assert(sw_ch->state.avail_in == 0);
360 
361 	return rc;
362 #else
363 	SPDK_ERRLOG("ISAL option is required to use software decompression.\n");
364 	return -EINVAL;
365 #endif
366 }
367 
368 static int
369 _sw_accel_crypto_operation(struct spdk_accel_task *accel_task, struct spdk_accel_crypto_key *key,
370 			   sw_accel_crypto_op op)
371 {
372 #ifdef SPDK_CONFIG_ISAL_CRYPTO
373 	uint64_t iv[2];
374 	size_t remaining_len, dst_len;
375 	uint64_t src_offset = 0, dst_offset = 0;
376 	uint32_t src_iovpos = 0, dst_iovpos = 0, src_iovcnt, dst_iovcnt;
377 	uint32_t i, block_size, crypto_len, crypto_accum_len = 0;
378 	struct iovec *src_iov, *dst_iov;
379 	uint8_t *src, *dst;
380 
381 	/* iv is 128 bits, since we are using logical block address (64 bits) as iv, fill first 8 bytes with zeroes */
382 	iv[0] = 0;
383 	iv[1] = accel_task->iv;
384 	src_iov = accel_task->s.iovs;
385 	src_iovcnt = accel_task->s.iovcnt;
386 	if (accel_task->d.iovcnt) {
387 		dst_iov = accel_task->d.iovs;
388 		dst_iovcnt = accel_task->d.iovcnt;
389 	} else {
390 		/* inplace operation */
391 		dst_iov = accel_task->s.iovs;
392 		dst_iovcnt = accel_task->s.iovcnt;
393 	}
394 	block_size = accel_task->block_size;
395 
396 	if (!src_iovcnt || !dst_iovcnt || !block_size || !op) {
397 		SPDK_ERRLOG("src_iovcnt %d, dst_iovcnt %d, block_size %d, op %p\n", src_iovcnt, dst_iovcnt,
398 			    block_size, op);
399 		return -EINVAL;
400 	}
401 
402 	remaining_len = 0;
403 	for (i = 0; i < src_iovcnt; i++) {
404 		remaining_len += src_iov[i].iov_len;
405 	}
406 	dst_len = 0;
407 	for (i = 0; i < dst_iovcnt; i++) {
408 		dst_len += dst_iov[i].iov_len;
409 	}
410 
411 	if (spdk_unlikely(remaining_len != dst_len || !remaining_len)) {
412 		return -ERANGE;
413 	}
414 	if (spdk_unlikely(remaining_len % accel_task->block_size != 0)) {
415 		return -EINVAL;
416 	}
417 
418 	while (remaining_len) {
419 		crypto_len = spdk_min(block_size - crypto_accum_len, src_iov->iov_len - src_offset);
420 		crypto_len = spdk_min(crypto_len, dst_iov->iov_len - dst_offset);
421 		src = (uint8_t *)src_iov->iov_base + src_offset;
422 		dst = (uint8_t *)dst_iov->iov_base + dst_offset;
423 
424 		op((uint8_t *)key->key2, (uint8_t *)key->key, (uint8_t *)iv, crypto_len, src, dst);
425 
426 		src_offset += crypto_len;
427 		dst_offset += crypto_len;
428 		crypto_accum_len += crypto_len;
429 		remaining_len -= crypto_len;
430 
431 		if (crypto_accum_len == block_size) {
432 			/* we can process part of logical block. Once the whole block is processed, increment iv */
433 			crypto_accum_len = 0;
434 			iv[1]++;
435 		}
436 		if (src_offset == src_iov->iov_len) {
437 			src_iov++;
438 			src_iovpos++;
439 			src_offset = 0;
440 		}
441 		if (src_iovpos == src_iovcnt) {
442 			break;
443 		}
444 		if (dst_offset == dst_iov->iov_len) {
445 			dst_iov++;
446 			dst_iovpos++;
447 			dst_offset = 0;
448 		}
449 		if (dst_iovpos == dst_iovcnt) {
450 			break;
451 		}
452 	}
453 
454 	if (remaining_len) {
455 		SPDK_ERRLOG("remaining len %zu\n", remaining_len);
456 		return -EINVAL;
457 	}
458 
459 	return 0;
460 #else
461 	return -ENOTSUP;
462 #endif
463 }
464 
465 static int
466 _sw_accel_encrypt(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
467 {
468 	struct spdk_accel_crypto_key *key;
469 	struct sw_accel_crypto_key_data *key_data;
470 
471 	key = accel_task->crypto_key;
472 	if (spdk_unlikely(key->module_if != &g_sw_module || !key->priv)) {
473 		return -EINVAL;
474 	}
475 	if (spdk_unlikely(accel_task->block_size > ACCEL_AES_XTS_MAX_BLOCK_SIZE)) {
476 		SPDK_WARNLOG("Max block size for AES_XTS is limited to %u, current size %u\n",
477 			     ACCEL_AES_XTS_MAX_BLOCK_SIZE, accel_task->block_size);
478 		return -ERANGE;
479 	}
480 	key_data = key->priv;
481 	return _sw_accel_crypto_operation(accel_task, key, key_data->encrypt);
482 }
483 
484 static int
485 _sw_accel_decrypt(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
486 {
487 	struct spdk_accel_crypto_key *key;
488 	struct sw_accel_crypto_key_data *key_data;
489 
490 	key = accel_task->crypto_key;
491 	if (spdk_unlikely(key->module_if != &g_sw_module || !key->priv)) {
492 		return -EINVAL;
493 	}
494 	if (spdk_unlikely(accel_task->block_size > ACCEL_AES_XTS_MAX_BLOCK_SIZE)) {
495 		SPDK_WARNLOG("Max block size for AES_XTS is limited to %u, current size %u\n",
496 			     ACCEL_AES_XTS_MAX_BLOCK_SIZE, accel_task->block_size);
497 		return -ERANGE;
498 	}
499 	key_data = key->priv;
500 	return _sw_accel_crypto_operation(accel_task, key, key_data->decrypt);
501 }
502 
503 static int
504 sw_accel_submit_tasks(struct spdk_io_channel *ch, struct spdk_accel_task *accel_task)
505 {
506 	struct sw_accel_io_channel *sw_ch = spdk_io_channel_get_ctx(ch);
507 	struct spdk_accel_task *tmp;
508 	int rc = 0;
509 
510 	do {
511 		switch (accel_task->op_code) {
512 		case ACCEL_OPC_COPY:
513 			rc = _check_flags(accel_task->flags);
514 			if (rc == 0) {
515 				_sw_accel_copy_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
516 						    accel_task->s.iovs, accel_task->s.iovcnt,
517 						    accel_task->flags);
518 			}
519 			break;
520 		case ACCEL_OPC_FILL:
521 			rc = _check_flags(accel_task->flags);
522 			if (rc == 0) {
523 				rc = _sw_accel_fill(accel_task->d.iovs, accel_task->d.iovcnt,
524 						    accel_task->fill_pattern, accel_task->flags);
525 			}
526 			break;
527 		case ACCEL_OPC_DUALCAST:
528 			rc = _check_flags(accel_task->flags);
529 			if (rc == 0) {
530 				rc = _sw_accel_dualcast_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
531 							     accel_task->d2.iovs, accel_task->d2.iovcnt,
532 							     accel_task->s.iovs, accel_task->s.iovcnt,
533 							     accel_task->flags);
534 			}
535 			break;
536 		case ACCEL_OPC_COMPARE:
537 			rc = _sw_accel_compare(accel_task->s.iovs, accel_task->s.iovcnt,
538 					       accel_task->s2.iovs, accel_task->s2.iovcnt);
539 			break;
540 		case ACCEL_OPC_CRC32C:
541 			_sw_accel_crc32cv(accel_task->crc_dst, accel_task->s.iovs, accel_task->s.iovcnt, accel_task->seed);
542 			break;
543 		case ACCEL_OPC_COPY_CRC32C:
544 			rc = _check_flags(accel_task->flags);
545 			if (rc == 0) {
546 				_sw_accel_copy_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
547 						    accel_task->s.iovs, accel_task->s.iovcnt,
548 						    accel_task->flags);
549 				_sw_accel_crc32cv(accel_task->crc_dst, accel_task->s.iovs,
550 						  accel_task->s.iovcnt, accel_task->seed);
551 			}
552 			break;
553 		case ACCEL_OPC_COMPRESS:
554 			rc = _sw_accel_compress(sw_ch, accel_task);
555 			break;
556 		case ACCEL_OPC_DECOMPRESS:
557 			rc = _sw_accel_decompress(sw_ch, accel_task);
558 			break;
559 		case ACCEL_OPC_ENCRYPT:
560 			rc = _sw_accel_encrypt(sw_ch, accel_task);
561 			break;
562 		case ACCEL_OPC_DECRYPT:
563 			rc = _sw_accel_decrypt(sw_ch, accel_task);
564 			break;
565 		default:
566 			assert(false);
567 			break;
568 		}
569 
570 		tmp = TAILQ_NEXT(accel_task, link);
571 
572 		_add_to_comp_list(sw_ch, accel_task, rc);
573 
574 		accel_task = tmp;
575 	} while (accel_task);
576 
577 	return 0;
578 }
579 
580 static struct spdk_io_channel *sw_accel_get_io_channel(void);
581 static int sw_accel_module_init(void);
582 static void sw_accel_module_fini(void *ctxt);
583 static size_t sw_accel_module_get_ctx_size(void);
584 
585 static struct spdk_accel_module_if g_sw_module = {
586 	.module_init		= sw_accel_module_init,
587 	.module_fini		= sw_accel_module_fini,
588 	.write_config_json	= NULL,
589 	.get_ctx_size		= sw_accel_module_get_ctx_size,
590 	.name			= "software",
591 	.supports_opcode	= sw_accel_supports_opcode,
592 	.get_io_channel		= sw_accel_get_io_channel,
593 	.submit_tasks		= sw_accel_submit_tasks,
594 	.crypto_key_init	= sw_accel_crypto_key_init,
595 	.crypto_key_deinit	= sw_accel_crypto_key_deinit,
596 };
597 
598 static int
599 accel_comp_poll(void *arg)
600 {
601 	struct sw_accel_io_channel	*sw_ch = arg;
602 	TAILQ_HEAD(, spdk_accel_task)	tasks_to_complete;
603 	struct spdk_accel_task		*accel_task;
604 
605 	if (TAILQ_EMPTY(&sw_ch->tasks_to_complete)) {
606 		return SPDK_POLLER_IDLE;
607 	}
608 
609 	TAILQ_INIT(&tasks_to_complete);
610 	TAILQ_SWAP(&tasks_to_complete, &sw_ch->tasks_to_complete, spdk_accel_task, link);
611 
612 	while ((accel_task = TAILQ_FIRST(&tasks_to_complete))) {
613 		TAILQ_REMOVE(&tasks_to_complete, accel_task, link);
614 		spdk_accel_task_complete(accel_task, accel_task->status);
615 	}
616 
617 	return SPDK_POLLER_BUSY;
618 }
619 
620 static int
621 sw_accel_create_cb(void *io_device, void *ctx_buf)
622 {
623 	struct sw_accel_io_channel *sw_ch = ctx_buf;
624 
625 	TAILQ_INIT(&sw_ch->tasks_to_complete);
626 	sw_ch->completion_poller = SPDK_POLLER_REGISTER(accel_comp_poll, sw_ch, 0);
627 
628 #ifdef SPDK_CONFIG_ISAL
629 	isal_deflate_init(&sw_ch->stream);
630 	sw_ch->stream.flush = NO_FLUSH;
631 	sw_ch->stream.level = 1;
632 	sw_ch->stream.level_buf = calloc(1, ISAL_DEF_LVL1_DEFAULT);
633 	if (sw_ch->stream.level_buf == NULL) {
634 		SPDK_ERRLOG("Could not allocate isal internal buffer\n");
635 		return -ENOMEM;
636 	}
637 	sw_ch->stream.level_buf_size = ISAL_DEF_LVL1_DEFAULT;
638 	isal_inflate_init(&sw_ch->state);
639 #endif
640 
641 	return 0;
642 }
643 
644 static void
645 sw_accel_destroy_cb(void *io_device, void *ctx_buf)
646 {
647 	struct sw_accel_io_channel *sw_ch = ctx_buf;
648 
649 #ifdef SPDK_CONFIG_ISAL
650 	free(sw_ch->stream.level_buf);
651 #endif
652 
653 	spdk_poller_unregister(&sw_ch->completion_poller);
654 }
655 
656 static struct spdk_io_channel *
657 sw_accel_get_io_channel(void)
658 {
659 	return spdk_get_io_channel(&g_sw_module);
660 }
661 
662 static size_t
663 sw_accel_module_get_ctx_size(void)
664 {
665 	return sizeof(struct spdk_accel_task);
666 }
667 
668 static int
669 sw_accel_module_init(void)
670 {
671 	SPDK_NOTICELOG("Accel framework software module initialized.\n");
672 	spdk_io_device_register(&g_sw_module, sw_accel_create_cb, sw_accel_destroy_cb,
673 				sizeof(struct sw_accel_io_channel), "sw_accel_module");
674 
675 	return 0;
676 }
677 
678 static void
679 sw_accel_module_fini(void *ctxt)
680 {
681 	spdk_io_device_unregister(&g_sw_module, NULL);
682 	spdk_accel_module_finish();
683 }
684 
685 static int
686 sw_accel_create_aes_xts(struct spdk_accel_crypto_key *key)
687 {
688 #ifdef SPDK_CONFIG_ISAL_CRYPTO
689 	struct sw_accel_crypto_key_data *key_data;
690 
691 	if (!key->key || !key->key2) {
692 		SPDK_ERRLOG("key or key2 are missing\n");
693 		return -EINVAL;
694 	}
695 
696 	if (!key->key_size || key->key_size != key->key2_size) {
697 		SPDK_ERRLOG("key size %zu is not equal to key2 size %zu or is 0\n", key->key_size,
698 			    key->key2_size);
699 		return -EINVAL;
700 	}
701 
702 	key_data = calloc(1, sizeof(*key_data));
703 	if (!key_data) {
704 		return -ENOMEM;
705 	}
706 
707 	switch (key->key_size) {
708 	case ACCEL_AES_XTS_128_KEY_SIZE:
709 		key_data->encrypt = XTS_AES_128_enc;
710 		key_data->decrypt = XTS_AES_128_dec;
711 		break;
712 	case ACCEL_AES_XTS_256_KEY_SIZE:
713 		key_data->encrypt = XTS_AES_256_enc;
714 		key_data->decrypt = XTS_AES_256_dec;
715 		break;
716 	default:
717 		SPDK_ERRLOG("Incorrect key size  %zu, should be %d for AEX_XTS_128 or %d for AES_XTS_256\n",
718 			    key->key_size, ACCEL_AES_XTS_128_KEY_SIZE, ACCEL_AES_XTS_256_KEY_SIZE);
719 		free(key_data);
720 		return -EINVAL;
721 	}
722 
723 	key->priv = key_data;
724 
725 	return 0;
726 #else
727 	return -ENOTSUP;
728 #endif
729 }
730 
731 static int
732 sw_accel_crypto_key_init(struct spdk_accel_crypto_key *key)
733 {
734 	if (!key || !key->param.cipher) {
735 		return -EINVAL;
736 	}
737 	if (strcmp(key->param.cipher, ACCEL_AES_XTS) == 0) {
738 		return sw_accel_create_aes_xts(key);
739 	} else {
740 		SPDK_ERRLOG("Only %s cipher is supported\n", ACCEL_AES_XTS);
741 		return -EINVAL;
742 	}
743 }
744 
745 static void
746 sw_accel_crypto_key_deinit(struct spdk_accel_crypto_key *key)
747 {
748 	if (!key || key->module_if != &g_sw_module || !key->priv) {
749 		return;
750 	}
751 
752 	free(key->priv);
753 }
754 
755 SPDK_ACCEL_MODULE_REGISTER(sw, &g_sw_module)
756