1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2022 Intel Corporation. 3 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES 4 * All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk/accel_module.h" 10 #include "accel_internal.h" 11 12 #include "spdk/env.h" 13 #include "spdk/likely.h" 14 #include "spdk/log.h" 15 #include "spdk/thread.h" 16 #include "spdk/json.h" 17 #include "spdk/crc32.h" 18 #include "spdk/util.h" 19 #include "spdk/xor.h" 20 21 #ifdef SPDK_CONFIG_ISAL 22 #include "../isa-l/include/igzip_lib.h" 23 #ifdef SPDK_CONFIG_ISAL_CRYPTO 24 #include "../isa-l-crypto/include/aes_xts.h" 25 #endif 26 #endif 27 28 /* Per the AES-XTS spec, the size of data unit cannot be bigger than 2^20 blocks, 128b each block */ 29 #define ACCEL_AES_XTS_MAX_BLOCK_SIZE (1 << 24) 30 31 struct sw_accel_io_channel { 32 /* for ISAL */ 33 #ifdef SPDK_CONFIG_ISAL 34 struct isal_zstream stream; 35 struct inflate_state state; 36 #endif 37 struct spdk_poller *completion_poller; 38 TAILQ_HEAD(, spdk_accel_task) tasks_to_complete; 39 }; 40 41 typedef void (*sw_accel_crypto_op)(uint8_t *k2, uint8_t *k1, uint8_t *tweak, uint64_t lba_size, 42 const uint8_t *src, uint8_t *dst); 43 44 struct sw_accel_crypto_key_data { 45 sw_accel_crypto_op encrypt; 46 sw_accel_crypto_op decrypt; 47 }; 48 49 static struct spdk_accel_module_if g_sw_module; 50 51 static void sw_accel_crypto_key_deinit(struct spdk_accel_crypto_key *_key); 52 static int sw_accel_crypto_key_init(struct spdk_accel_crypto_key *key); 53 static bool sw_accel_crypto_supports_tweak_mode(enum spdk_accel_crypto_tweak_mode tweak_mode); 54 static bool sw_accel_crypto_supports_cipher(enum spdk_accel_cipher cipher, size_t key_size); 55 56 /* Post SW completions to a list and complete in a poller as we don't want to 57 * complete them on the caller's stack as they'll likely submit another. */ 58 inline static void 59 _add_to_comp_list(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task, int status) 60 { 61 accel_task->status = status; 62 TAILQ_INSERT_TAIL(&sw_ch->tasks_to_complete, accel_task, link); 63 } 64 65 static bool 66 sw_accel_supports_opcode(enum spdk_accel_opcode opc) 67 { 68 switch (opc) { 69 case SPDK_ACCEL_OPC_COPY: 70 case SPDK_ACCEL_OPC_FILL: 71 case SPDK_ACCEL_OPC_DUALCAST: 72 case SPDK_ACCEL_OPC_COMPARE: 73 case SPDK_ACCEL_OPC_CRC32C: 74 case SPDK_ACCEL_OPC_COPY_CRC32C: 75 case SPDK_ACCEL_OPC_COMPRESS: 76 case SPDK_ACCEL_OPC_DECOMPRESS: 77 case SPDK_ACCEL_OPC_ENCRYPT: 78 case SPDK_ACCEL_OPC_DECRYPT: 79 case SPDK_ACCEL_OPC_XOR: 80 return true; 81 default: 82 return false; 83 } 84 } 85 86 static int 87 _sw_accel_dualcast_iovs(struct iovec *dst_iovs, uint32_t dst_iovcnt, 88 struct iovec *dst2_iovs, uint32_t dst2_iovcnt, 89 struct iovec *src_iovs, uint32_t src_iovcnt) 90 { 91 if (spdk_unlikely(dst_iovcnt != 1 || dst2_iovcnt != 1 || src_iovcnt != 1)) { 92 return -EINVAL; 93 } 94 95 if (spdk_unlikely(dst_iovs[0].iov_len != src_iovs[0].iov_len || 96 dst_iovs[0].iov_len != dst2_iovs[0].iov_len)) { 97 return -EINVAL; 98 } 99 100 memcpy(dst_iovs[0].iov_base, src_iovs[0].iov_base, dst_iovs[0].iov_len); 101 memcpy(dst2_iovs[0].iov_base, src_iovs[0].iov_base, dst_iovs[0].iov_len); 102 103 return 0; 104 } 105 106 static void 107 _sw_accel_copy_iovs(struct iovec *dst_iovs, uint32_t dst_iovcnt, 108 struct iovec *src_iovs, uint32_t src_iovcnt) 109 { 110 struct spdk_ioviter iter; 111 void *src, *dst; 112 size_t len; 113 114 for (len = spdk_ioviter_first(&iter, src_iovs, src_iovcnt, 115 dst_iovs, dst_iovcnt, &src, &dst); 116 len != 0; 117 len = spdk_ioviter_next(&iter, &src, &dst)) { 118 memcpy(dst, src, len); 119 } 120 } 121 122 static int 123 _sw_accel_compare(struct iovec *src_iovs, uint32_t src_iovcnt, 124 struct iovec *src2_iovs, uint32_t src2_iovcnt) 125 { 126 if (spdk_unlikely(src_iovcnt != 1 || src2_iovcnt != 1)) { 127 return -EINVAL; 128 } 129 130 if (spdk_unlikely(src_iovs[0].iov_len != src2_iovs[0].iov_len)) { 131 return -EINVAL; 132 } 133 134 return memcmp(src_iovs[0].iov_base, src2_iovs[0].iov_base, src_iovs[0].iov_len); 135 } 136 137 static int 138 _sw_accel_fill(struct iovec *iovs, uint32_t iovcnt, uint8_t fill) 139 { 140 void *dst; 141 size_t nbytes; 142 143 if (spdk_unlikely(iovcnt != 1)) { 144 return -EINVAL; 145 } 146 147 dst = iovs[0].iov_base; 148 nbytes = iovs[0].iov_len; 149 150 memset(dst, fill, nbytes); 151 152 return 0; 153 } 154 155 static void 156 _sw_accel_crc32cv(uint32_t *crc_dst, struct iovec *iov, uint32_t iovcnt, uint32_t seed) 157 { 158 *crc_dst = spdk_crc32c_iov_update(iov, iovcnt, ~seed); 159 } 160 161 static int 162 _sw_accel_compress(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task) 163 { 164 #ifdef SPDK_CONFIG_ISAL 165 size_t last_seglen = accel_task->s.iovs[accel_task->s.iovcnt - 1].iov_len; 166 struct iovec *siov = accel_task->s.iovs; 167 struct iovec *diov = accel_task->d.iovs; 168 size_t remaining; 169 uint32_t i, s = 0, d = 0; 170 int rc = 0; 171 172 remaining = 0; 173 for (i = 0; i < accel_task->s.iovcnt; ++i) { 174 remaining += accel_task->s.iovs[i].iov_len; 175 } 176 177 isal_deflate_reset(&sw_ch->stream); 178 sw_ch->stream.end_of_stream = 0; 179 sw_ch->stream.next_out = diov[d].iov_base; 180 sw_ch->stream.avail_out = diov[d].iov_len; 181 sw_ch->stream.next_in = siov[s].iov_base; 182 sw_ch->stream.avail_in = siov[s].iov_len; 183 184 do { 185 /* if isal has exhausted the current dst iovec, move to the next 186 * one if there is one */ 187 if (sw_ch->stream.avail_out == 0) { 188 if (++d < accel_task->d.iovcnt) { 189 sw_ch->stream.next_out = diov[d].iov_base; 190 sw_ch->stream.avail_out = diov[d].iov_len; 191 assert(sw_ch->stream.avail_out > 0); 192 } else { 193 /* we have no avail_out but also no more iovecs left so this is 194 * the case where either the output buffer was a perfect fit 195 * or not enough was provided. Check the ISAL state to determine 196 * which. */ 197 if (sw_ch->stream.internal_state.state != ZSTATE_END) { 198 SPDK_ERRLOG("Not enough destination buffer provided.\n"); 199 rc = -ENOMEM; 200 } 201 break; 202 } 203 } 204 205 /* if isal has exhausted the current src iovec, move to the next 206 * one if there is one */ 207 if (sw_ch->stream.avail_in == 0 && ((s + 1) < accel_task->s.iovcnt)) { 208 s++; 209 sw_ch->stream.next_in = siov[s].iov_base; 210 sw_ch->stream.avail_in = siov[s].iov_len; 211 assert(sw_ch->stream.avail_in > 0); 212 } 213 214 if (remaining <= last_seglen) { 215 /* Need to set end of stream on last block */ 216 sw_ch->stream.end_of_stream = 1; 217 } 218 219 rc = isal_deflate(&sw_ch->stream); 220 if (rc) { 221 SPDK_ERRLOG("isal_deflate returned error %d.\n", rc); 222 } 223 224 if (remaining > 0) { 225 assert(siov[s].iov_len > sw_ch->stream.avail_in); 226 remaining -= (siov[s].iov_len - sw_ch->stream.avail_in); 227 } 228 229 } while (remaining > 0 || sw_ch->stream.avail_out == 0); 230 assert(sw_ch->stream.avail_in == 0); 231 232 /* Get our total output size */ 233 if (accel_task->output_size != NULL) { 234 assert(sw_ch->stream.total_out > 0); 235 *accel_task->output_size = sw_ch->stream.total_out; 236 } 237 238 return rc; 239 #else 240 SPDK_ERRLOG("ISAL option is required to use software compression.\n"); 241 return -EINVAL; 242 #endif 243 } 244 245 static int 246 _sw_accel_decompress(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task) 247 { 248 #ifdef SPDK_CONFIG_ISAL 249 struct iovec *siov = accel_task->s.iovs; 250 struct iovec *diov = accel_task->d.iovs; 251 uint32_t s = 0, d = 0; 252 int rc = 0; 253 254 isal_inflate_reset(&sw_ch->state); 255 sw_ch->state.next_out = diov[d].iov_base; 256 sw_ch->state.avail_out = diov[d].iov_len; 257 sw_ch->state.next_in = siov[s].iov_base; 258 sw_ch->state.avail_in = siov[s].iov_len; 259 260 do { 261 /* if isal has exhausted the current dst iovec, move to the next 262 * one if there is one */ 263 if (sw_ch->state.avail_out == 0 && ((d + 1) < accel_task->d.iovcnt)) { 264 d++; 265 sw_ch->state.next_out = diov[d].iov_base; 266 sw_ch->state.avail_out = diov[d].iov_len; 267 assert(sw_ch->state.avail_out > 0); 268 } 269 270 /* if isal has exhausted the current src iovec, move to the next 271 * one if there is one */ 272 if (sw_ch->state.avail_in == 0 && ((s + 1) < accel_task->s.iovcnt)) { 273 s++; 274 sw_ch->state.next_in = siov[s].iov_base; 275 sw_ch->state.avail_in = siov[s].iov_len; 276 assert(sw_ch->state.avail_in > 0); 277 } 278 279 rc = isal_inflate(&sw_ch->state); 280 if (rc) { 281 SPDK_ERRLOG("isal_inflate returned error %d.\n", rc); 282 } 283 284 } while (sw_ch->state.block_state < ISAL_BLOCK_FINISH); 285 assert(sw_ch->state.avail_in == 0); 286 287 /* Get our total output size */ 288 if (accel_task->output_size != NULL) { 289 assert(sw_ch->state.total_out > 0); 290 *accel_task->output_size = sw_ch->state.total_out; 291 } 292 293 return rc; 294 #else 295 SPDK_ERRLOG("ISAL option is required to use software decompression.\n"); 296 return -EINVAL; 297 #endif 298 } 299 300 static int 301 _sw_accel_crypto_operation(struct spdk_accel_task *accel_task, struct spdk_accel_crypto_key *key, 302 sw_accel_crypto_op op) 303 { 304 #ifdef SPDK_CONFIG_ISAL_CRYPTO 305 uint64_t iv[2]; 306 size_t remaining_len, dst_len; 307 uint64_t src_offset = 0, dst_offset = 0; 308 uint32_t src_iovpos = 0, dst_iovpos = 0, src_iovcnt, dst_iovcnt; 309 uint32_t i, block_size, crypto_len, crypto_accum_len = 0; 310 struct iovec *src_iov, *dst_iov; 311 uint8_t *src, *dst; 312 313 /* iv is 128 bits, since we are using logical block address (64 bits) as iv, fill first 8 bytes with zeroes */ 314 iv[0] = 0; 315 iv[1] = accel_task->iv; 316 src_iov = accel_task->s.iovs; 317 src_iovcnt = accel_task->s.iovcnt; 318 if (accel_task->d.iovcnt) { 319 dst_iov = accel_task->d.iovs; 320 dst_iovcnt = accel_task->d.iovcnt; 321 } else { 322 /* inplace operation */ 323 dst_iov = accel_task->s.iovs; 324 dst_iovcnt = accel_task->s.iovcnt; 325 } 326 block_size = accel_task->block_size; 327 328 if (!src_iovcnt || !dst_iovcnt || !block_size || !op) { 329 SPDK_ERRLOG("src_iovcnt %d, dst_iovcnt %d, block_size %d, op %p\n", src_iovcnt, dst_iovcnt, 330 block_size, op); 331 return -EINVAL; 332 } 333 334 remaining_len = 0; 335 for (i = 0; i < src_iovcnt; i++) { 336 remaining_len += src_iov[i].iov_len; 337 } 338 dst_len = 0; 339 for (i = 0; i < dst_iovcnt; i++) { 340 dst_len += dst_iov[i].iov_len; 341 } 342 343 if (spdk_unlikely(remaining_len != dst_len || !remaining_len)) { 344 return -ERANGE; 345 } 346 if (spdk_unlikely(remaining_len % accel_task->block_size != 0)) { 347 return -EINVAL; 348 } 349 350 while (remaining_len) { 351 crypto_len = spdk_min(block_size - crypto_accum_len, src_iov->iov_len - src_offset); 352 crypto_len = spdk_min(crypto_len, dst_iov->iov_len - dst_offset); 353 src = (uint8_t *)src_iov->iov_base + src_offset; 354 dst = (uint8_t *)dst_iov->iov_base + dst_offset; 355 356 op((uint8_t *)key->key2, (uint8_t *)key->key, (uint8_t *)iv, crypto_len, src, dst); 357 358 src_offset += crypto_len; 359 dst_offset += crypto_len; 360 crypto_accum_len += crypto_len; 361 remaining_len -= crypto_len; 362 363 if (crypto_accum_len == block_size) { 364 /* we can process part of logical block. Once the whole block is processed, increment iv */ 365 crypto_accum_len = 0; 366 iv[1]++; 367 } 368 if (src_offset == src_iov->iov_len) { 369 src_iov++; 370 src_iovpos++; 371 src_offset = 0; 372 } 373 if (src_iovpos == src_iovcnt) { 374 break; 375 } 376 if (dst_offset == dst_iov->iov_len) { 377 dst_iov++; 378 dst_iovpos++; 379 dst_offset = 0; 380 } 381 if (dst_iovpos == dst_iovcnt) { 382 break; 383 } 384 } 385 386 if (remaining_len) { 387 SPDK_ERRLOG("remaining len %zu\n", remaining_len); 388 return -EINVAL; 389 } 390 391 return 0; 392 #else 393 return -ENOTSUP; 394 #endif 395 } 396 397 static int 398 _sw_accel_encrypt(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task) 399 { 400 struct spdk_accel_crypto_key *key; 401 struct sw_accel_crypto_key_data *key_data; 402 403 key = accel_task->crypto_key; 404 if (spdk_unlikely(key->module_if != &g_sw_module || !key->priv)) { 405 return -EINVAL; 406 } 407 if (spdk_unlikely(accel_task->block_size > ACCEL_AES_XTS_MAX_BLOCK_SIZE)) { 408 SPDK_WARNLOG("Max block size for AES_XTS is limited to %u, current size %u\n", 409 ACCEL_AES_XTS_MAX_BLOCK_SIZE, accel_task->block_size); 410 return -ERANGE; 411 } 412 key_data = key->priv; 413 return _sw_accel_crypto_operation(accel_task, key, key_data->encrypt); 414 } 415 416 static int 417 _sw_accel_decrypt(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task) 418 { 419 struct spdk_accel_crypto_key *key; 420 struct sw_accel_crypto_key_data *key_data; 421 422 key = accel_task->crypto_key; 423 if (spdk_unlikely(key->module_if != &g_sw_module || !key->priv)) { 424 return -EINVAL; 425 } 426 if (spdk_unlikely(accel_task->block_size > ACCEL_AES_XTS_MAX_BLOCK_SIZE)) { 427 SPDK_WARNLOG("Max block size for AES_XTS is limited to %u, current size %u\n", 428 ACCEL_AES_XTS_MAX_BLOCK_SIZE, accel_task->block_size); 429 return -ERANGE; 430 } 431 key_data = key->priv; 432 return _sw_accel_crypto_operation(accel_task, key, key_data->decrypt); 433 } 434 435 static int 436 _sw_accel_xor(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task) 437 { 438 return spdk_xor_gen(accel_task->d.iovs[0].iov_base, 439 accel_task->nsrcs.srcs, 440 accel_task->nsrcs.cnt, 441 accel_task->d.iovs[0].iov_len); 442 } 443 444 static int 445 sw_accel_submit_tasks(struct spdk_io_channel *ch, struct spdk_accel_task *accel_task) 446 { 447 struct sw_accel_io_channel *sw_ch = spdk_io_channel_get_ctx(ch); 448 struct spdk_accel_task *tmp; 449 int rc = 0; 450 451 do { 452 switch (accel_task->op_code) { 453 case SPDK_ACCEL_OPC_COPY: 454 _sw_accel_copy_iovs(accel_task->d.iovs, accel_task->d.iovcnt, 455 accel_task->s.iovs, accel_task->s.iovcnt); 456 break; 457 case SPDK_ACCEL_OPC_FILL: 458 rc = _sw_accel_fill(accel_task->d.iovs, accel_task->d.iovcnt, 459 accel_task->fill_pattern); 460 break; 461 case SPDK_ACCEL_OPC_DUALCAST: 462 rc = _sw_accel_dualcast_iovs(accel_task->d.iovs, accel_task->d.iovcnt, 463 accel_task->d2.iovs, accel_task->d2.iovcnt, 464 accel_task->s.iovs, accel_task->s.iovcnt); 465 break; 466 case SPDK_ACCEL_OPC_COMPARE: 467 rc = _sw_accel_compare(accel_task->s.iovs, accel_task->s.iovcnt, 468 accel_task->s2.iovs, accel_task->s2.iovcnt); 469 break; 470 case SPDK_ACCEL_OPC_CRC32C: 471 _sw_accel_crc32cv(accel_task->crc_dst, accel_task->s.iovs, accel_task->s.iovcnt, accel_task->seed); 472 break; 473 case SPDK_ACCEL_OPC_COPY_CRC32C: 474 _sw_accel_copy_iovs(accel_task->d.iovs, accel_task->d.iovcnt, 475 accel_task->s.iovs, accel_task->s.iovcnt); 476 _sw_accel_crc32cv(accel_task->crc_dst, accel_task->s.iovs, 477 accel_task->s.iovcnt, accel_task->seed); 478 break; 479 case SPDK_ACCEL_OPC_COMPRESS: 480 rc = _sw_accel_compress(sw_ch, accel_task); 481 break; 482 case SPDK_ACCEL_OPC_DECOMPRESS: 483 rc = _sw_accel_decompress(sw_ch, accel_task); 484 break; 485 case SPDK_ACCEL_OPC_XOR: 486 rc = _sw_accel_xor(sw_ch, accel_task); 487 break; 488 case SPDK_ACCEL_OPC_ENCRYPT: 489 rc = _sw_accel_encrypt(sw_ch, accel_task); 490 break; 491 case SPDK_ACCEL_OPC_DECRYPT: 492 rc = _sw_accel_decrypt(sw_ch, accel_task); 493 break; 494 default: 495 assert(false); 496 break; 497 } 498 499 tmp = TAILQ_NEXT(accel_task, link); 500 501 _add_to_comp_list(sw_ch, accel_task, rc); 502 503 accel_task = tmp; 504 } while (accel_task); 505 506 return 0; 507 } 508 509 static int 510 accel_comp_poll(void *arg) 511 { 512 struct sw_accel_io_channel *sw_ch = arg; 513 TAILQ_HEAD(, spdk_accel_task) tasks_to_complete; 514 struct spdk_accel_task *accel_task; 515 516 if (TAILQ_EMPTY(&sw_ch->tasks_to_complete)) { 517 return SPDK_POLLER_IDLE; 518 } 519 520 TAILQ_INIT(&tasks_to_complete); 521 TAILQ_SWAP(&tasks_to_complete, &sw_ch->tasks_to_complete, spdk_accel_task, link); 522 523 while ((accel_task = TAILQ_FIRST(&tasks_to_complete))) { 524 TAILQ_REMOVE(&tasks_to_complete, accel_task, link); 525 spdk_accel_task_complete(accel_task, accel_task->status); 526 } 527 528 return SPDK_POLLER_BUSY; 529 } 530 531 static int 532 sw_accel_create_cb(void *io_device, void *ctx_buf) 533 { 534 struct sw_accel_io_channel *sw_ch = ctx_buf; 535 536 TAILQ_INIT(&sw_ch->tasks_to_complete); 537 sw_ch->completion_poller = SPDK_POLLER_REGISTER(accel_comp_poll, sw_ch, 0); 538 539 #ifdef SPDK_CONFIG_ISAL 540 isal_deflate_init(&sw_ch->stream); 541 sw_ch->stream.flush = NO_FLUSH; 542 sw_ch->stream.level = 1; 543 sw_ch->stream.level_buf = calloc(1, ISAL_DEF_LVL1_DEFAULT); 544 if (sw_ch->stream.level_buf == NULL) { 545 SPDK_ERRLOG("Could not allocate isal internal buffer\n"); 546 return -ENOMEM; 547 } 548 sw_ch->stream.level_buf_size = ISAL_DEF_LVL1_DEFAULT; 549 isal_inflate_init(&sw_ch->state); 550 #endif 551 552 return 0; 553 } 554 555 static void 556 sw_accel_destroy_cb(void *io_device, void *ctx_buf) 557 { 558 struct sw_accel_io_channel *sw_ch = ctx_buf; 559 560 #ifdef SPDK_CONFIG_ISAL 561 free(sw_ch->stream.level_buf); 562 #endif 563 564 spdk_poller_unregister(&sw_ch->completion_poller); 565 } 566 567 static struct spdk_io_channel * 568 sw_accel_get_io_channel(void) 569 { 570 return spdk_get_io_channel(&g_sw_module); 571 } 572 573 static size_t 574 sw_accel_module_get_ctx_size(void) 575 { 576 return sizeof(struct spdk_accel_task); 577 } 578 579 static int 580 sw_accel_module_init(void) 581 { 582 spdk_io_device_register(&g_sw_module, sw_accel_create_cb, sw_accel_destroy_cb, 583 sizeof(struct sw_accel_io_channel), "sw_accel_module"); 584 585 return 0; 586 } 587 588 static void 589 sw_accel_module_fini(void *ctxt) 590 { 591 spdk_io_device_unregister(&g_sw_module, NULL); 592 spdk_accel_module_finish(); 593 } 594 595 static int 596 sw_accel_create_aes_xts(struct spdk_accel_crypto_key *key) 597 { 598 #ifdef SPDK_CONFIG_ISAL_CRYPTO 599 struct sw_accel_crypto_key_data *key_data; 600 601 key_data = calloc(1, sizeof(*key_data)); 602 if (!key_data) { 603 return -ENOMEM; 604 } 605 606 switch (key->key_size) { 607 case SPDK_ACCEL_AES_XTS_128_KEY_SIZE: 608 key_data->encrypt = XTS_AES_128_enc; 609 key_data->decrypt = XTS_AES_128_dec; 610 break; 611 case SPDK_ACCEL_AES_XTS_256_KEY_SIZE: 612 key_data->encrypt = XTS_AES_256_enc; 613 key_data->decrypt = XTS_AES_256_dec; 614 break; 615 default: 616 assert(0); 617 free(key_data); 618 return -EINVAL; 619 } 620 621 key->priv = key_data; 622 623 return 0; 624 #else 625 return -ENOTSUP; 626 #endif 627 } 628 629 static int 630 sw_accel_crypto_key_init(struct spdk_accel_crypto_key *key) 631 { 632 return sw_accel_create_aes_xts(key); 633 } 634 635 static void 636 sw_accel_crypto_key_deinit(struct spdk_accel_crypto_key *key) 637 { 638 if (!key || key->module_if != &g_sw_module || !key->priv) { 639 return; 640 } 641 642 free(key->priv); 643 } 644 645 static bool 646 sw_accel_crypto_supports_tweak_mode(enum spdk_accel_crypto_tweak_mode tweak_mode) 647 { 648 return tweak_mode == SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA; 649 } 650 651 static bool 652 sw_accel_crypto_supports_cipher(enum spdk_accel_cipher cipher, size_t key_size) 653 { 654 switch (cipher) { 655 case SPDK_ACCEL_CIPHER_AES_XTS: 656 return key_size == SPDK_ACCEL_AES_XTS_128_KEY_SIZE || key_size == SPDK_ACCEL_AES_XTS_256_KEY_SIZE; 657 default: 658 return false; 659 } 660 } 661 662 static int 663 sw_accel_get_operation_info(enum spdk_accel_opcode opcode, 664 const struct spdk_accel_operation_exec_ctx *ctx, 665 struct spdk_accel_opcode_info *info) 666 { 667 info->required_alignment = 0; 668 669 return 0; 670 } 671 672 static struct spdk_accel_module_if g_sw_module = { 673 .module_init = sw_accel_module_init, 674 .module_fini = sw_accel_module_fini, 675 .write_config_json = NULL, 676 .get_ctx_size = sw_accel_module_get_ctx_size, 677 .name = "software", 678 .supports_opcode = sw_accel_supports_opcode, 679 .get_io_channel = sw_accel_get_io_channel, 680 .submit_tasks = sw_accel_submit_tasks, 681 .crypto_key_init = sw_accel_crypto_key_init, 682 .crypto_key_deinit = sw_accel_crypto_key_deinit, 683 .crypto_supports_tweak_mode = sw_accel_crypto_supports_tweak_mode, 684 .crypto_supports_cipher = sw_accel_crypto_supports_cipher, 685 .get_operation_info = sw_accel_get_operation_info, 686 }; 687 688 SPDK_ACCEL_MODULE_REGISTER(sw, &g_sw_module) 689