1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2020 Intel Corporation. 3 * Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES. 4 * All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk/accel_module.h" 10 11 #include "accel_internal.h" 12 13 #include "spdk/dma.h" 14 #include "spdk/env.h" 15 #include "spdk/likely.h" 16 #include "spdk/log.h" 17 #include "spdk/thread.h" 18 #include "spdk/json.h" 19 #include "spdk/crc32.h" 20 #include "spdk/util.h" 21 #include "spdk/hexlify.h" 22 #include "spdk/string.h" 23 24 /* Accelerator Framework: The following provides a top level 25 * generic API for the accelerator functions defined here. Modules, 26 * such as the one in /module/accel/ioat, supply the implementation 27 * with the exception of the pure software implementation contained 28 * later in this file. 29 */ 30 31 #define ALIGN_4K 0x1000 32 #define MAX_TASKS_PER_CHANNEL 0x800 33 #define ACCEL_SMALL_CACHE_SIZE 128 34 #define ACCEL_LARGE_CACHE_SIZE 16 35 /* Set MSB, so we don't return NULL pointers as buffers */ 36 #define ACCEL_BUFFER_BASE ((void *)(1ull << 63)) 37 #define ACCEL_BUFFER_OFFSET_MASK ((uintptr_t)ACCEL_BUFFER_BASE - 1) 38 39 #define ACCEL_CRYPTO_TWEAK_MODE_DEFAULT SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA 40 41 struct accel_module { 42 struct spdk_accel_module_if *module; 43 bool supports_memory_domains; 44 }; 45 46 /* Largest context size for all accel modules */ 47 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task); 48 49 static struct spdk_accel_module_if *g_accel_module = NULL; 50 static spdk_accel_fini_cb g_fini_cb_fn = NULL; 51 static void *g_fini_cb_arg = NULL; 52 static bool g_modules_started = false; 53 static struct spdk_memory_domain *g_accel_domain; 54 55 /* Global list of registered accelerator modules */ 56 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list = 57 TAILQ_HEAD_INITIALIZER(spdk_accel_module_list); 58 59 /* Crypto keyring */ 60 static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring); 61 static struct spdk_spinlock g_keyring_spin; 62 63 /* Global array mapping capabilities to modules */ 64 static struct accel_module g_modules_opc[SPDK_ACCEL_OPC_LAST] = {}; 65 static char *g_modules_opc_override[SPDK_ACCEL_OPC_LAST] = {}; 66 TAILQ_HEAD(, spdk_accel_driver) g_accel_drivers = TAILQ_HEAD_INITIALIZER(g_accel_drivers); 67 static struct spdk_accel_driver *g_accel_driver; 68 static struct spdk_accel_opts g_opts = { 69 .small_cache_size = ACCEL_SMALL_CACHE_SIZE, 70 .large_cache_size = ACCEL_LARGE_CACHE_SIZE, 71 .task_count = MAX_TASKS_PER_CHANNEL, 72 .sequence_count = MAX_TASKS_PER_CHANNEL, 73 .buf_count = MAX_TASKS_PER_CHANNEL, 74 }; 75 static struct accel_stats g_stats; 76 static struct spdk_spinlock g_stats_lock; 77 78 static const char *g_opcode_strings[SPDK_ACCEL_OPC_LAST] = { 79 "copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c", 80 "compress", "decompress", "encrypt", "decrypt", "xor", 81 "dif_verify", "dif_verify_copy", "dif_generate", "dif_generate_copy" 82 }; 83 84 enum accel_sequence_state { 85 ACCEL_SEQUENCE_STATE_INIT, 86 ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF, 87 ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF, 88 ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF, 89 ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF, 90 ACCEL_SEQUENCE_STATE_PULL_DATA, 91 ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA, 92 ACCEL_SEQUENCE_STATE_EXEC_TASK, 93 ACCEL_SEQUENCE_STATE_AWAIT_TASK, 94 ACCEL_SEQUENCE_STATE_COMPLETE_TASK, 95 ACCEL_SEQUENCE_STATE_NEXT_TASK, 96 ACCEL_SEQUENCE_STATE_PUSH_DATA, 97 ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA, 98 ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS, 99 ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS, 100 ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS, 101 ACCEL_SEQUENCE_STATE_ERROR, 102 ACCEL_SEQUENCE_STATE_MAX, 103 }; 104 105 static const char *g_seq_states[] 106 __attribute__((unused)) = { 107 [ACCEL_SEQUENCE_STATE_INIT] = "init", 108 [ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf", 109 [ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf", 110 [ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf", 111 [ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf", 112 [ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data", 113 [ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data", 114 [ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task", 115 [ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task", 116 [ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task", 117 [ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task", 118 [ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data", 119 [ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data", 120 [ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS] = "driver-exec-tasks", 121 [ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS] = "driver-await-tasks", 122 [ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS] = "driver-complete-tasks", 123 [ACCEL_SEQUENCE_STATE_ERROR] = "error", 124 [ACCEL_SEQUENCE_STATE_MAX] = "", 125 }; 126 127 #define ACCEL_SEQUENCE_STATE_STRING(s) \ 128 (((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \ 129 ? g_seq_states[s] : "unknown") 130 131 struct accel_buffer { 132 struct spdk_accel_sequence *seq; 133 void *buf; 134 uint64_t len; 135 struct spdk_iobuf_entry iobuf; 136 spdk_accel_sequence_get_buf_cb cb_fn; 137 void *cb_ctx; 138 SLIST_ENTRY(accel_buffer) link; 139 struct accel_io_channel *ch; 140 }; 141 142 struct accel_io_channel { 143 struct spdk_io_channel *module_ch[SPDK_ACCEL_OPC_LAST]; 144 struct spdk_io_channel *driver_channel; 145 void *task_pool_base; 146 struct spdk_accel_sequence *seq_pool_base; 147 struct accel_buffer *buf_pool_base; 148 struct spdk_accel_task_aux_data *task_aux_data_base; 149 STAILQ_HEAD(, spdk_accel_task) task_pool; 150 SLIST_HEAD(, spdk_accel_task_aux_data) task_aux_data_pool; 151 SLIST_HEAD(, spdk_accel_sequence) seq_pool; 152 SLIST_HEAD(, accel_buffer) buf_pool; 153 struct spdk_iobuf_channel iobuf; 154 struct accel_stats stats; 155 }; 156 157 TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task); 158 159 struct spdk_accel_sequence { 160 struct accel_io_channel *ch; 161 struct accel_sequence_tasks tasks; 162 SLIST_HEAD(, accel_buffer) bounce_bufs; 163 int status; 164 /* state uses enum accel_sequence_state */ 165 uint8_t state; 166 bool in_process_sequence; 167 spdk_accel_completion_cb cb_fn; 168 void *cb_arg; 169 SLIST_ENTRY(spdk_accel_sequence) link; 170 }; 171 SPDK_STATIC_ASSERT(sizeof(struct spdk_accel_sequence) == 64, "invalid size"); 172 173 #define accel_update_stats(ch, event, v) \ 174 do { \ 175 (ch)->stats.event += (v); \ 176 } while (0) 177 178 #define accel_update_task_stats(ch, task, event, v) \ 179 accel_update_stats(ch, operations[(task)->op_code].event, v) 180 181 static inline void accel_sequence_task_cb(void *cb_arg, int status); 182 183 static inline void 184 accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state) 185 { 186 SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq, 187 ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state)); 188 assert(seq->state != ACCEL_SEQUENCE_STATE_ERROR || state == ACCEL_SEQUENCE_STATE_ERROR); 189 seq->state = state; 190 } 191 192 static void 193 accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status) 194 { 195 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR); 196 assert(status != 0); 197 seq->status = status; 198 } 199 200 int 201 spdk_accel_get_opc_module_name(enum spdk_accel_opcode opcode, const char **module_name) 202 { 203 if (opcode >= SPDK_ACCEL_OPC_LAST) { 204 /* invalid opcode */ 205 return -EINVAL; 206 } 207 208 if (g_modules_opc[opcode].module) { 209 *module_name = g_modules_opc[opcode].module->name; 210 } else { 211 return -ENOENT; 212 } 213 214 return 0; 215 } 216 217 void 218 _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn) 219 { 220 struct spdk_accel_module_if *accel_module; 221 enum spdk_accel_opcode opcode; 222 int j = 0; 223 224 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 225 for (opcode = 0; opcode < SPDK_ACCEL_OPC_LAST; opcode++) { 226 if (accel_module->supports_opcode(opcode)) { 227 info->ops[j] = opcode; 228 j++; 229 } 230 } 231 info->name = accel_module->name; 232 info->num_ops = j; 233 fn(info); 234 j = 0; 235 } 236 } 237 238 const char * 239 spdk_accel_get_opcode_name(enum spdk_accel_opcode opcode) 240 { 241 if (opcode < SPDK_ACCEL_OPC_LAST) { 242 return g_opcode_strings[opcode]; 243 } 244 245 return NULL; 246 } 247 248 int 249 spdk_accel_assign_opc(enum spdk_accel_opcode opcode, const char *name) 250 { 251 char *copy; 252 253 if (g_modules_started == true) { 254 /* we don't allow re-assignment once things have started */ 255 return -EINVAL; 256 } 257 258 if (opcode >= SPDK_ACCEL_OPC_LAST) { 259 /* invalid opcode */ 260 return -EINVAL; 261 } 262 263 copy = strdup(name); 264 if (copy == NULL) { 265 return -ENOMEM; 266 } 267 268 /* module selection will be validated after the framework starts. */ 269 free(g_modules_opc_override[opcode]); 270 g_modules_opc_override[opcode] = copy; 271 272 return 0; 273 } 274 275 inline static struct spdk_accel_task * 276 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg) 277 { 278 struct spdk_accel_task *accel_task; 279 280 accel_task = STAILQ_FIRST(&accel_ch->task_pool); 281 if (spdk_unlikely(accel_task == NULL)) { 282 accel_update_stats(accel_ch, retry.task, 1); 283 return NULL; 284 } 285 286 STAILQ_REMOVE_HEAD(&accel_ch->task_pool, link); 287 accel_task->link.stqe_next = NULL; 288 289 accel_task->cb_fn = cb_fn; 290 accel_task->cb_arg = cb_arg; 291 accel_task->accel_ch = accel_ch; 292 accel_task->s.iovs = NULL; 293 accel_task->d.iovs = NULL; 294 295 return accel_task; 296 } 297 298 static void 299 _put_task(struct accel_io_channel *ch, struct spdk_accel_task *task) 300 { 301 STAILQ_INSERT_HEAD(&ch->task_pool, task, link); 302 } 303 304 void 305 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status) 306 { 307 struct accel_io_channel *accel_ch = accel_task->accel_ch; 308 spdk_accel_completion_cb cb_fn; 309 void *cb_arg; 310 311 accel_update_task_stats(accel_ch, accel_task, executed, 1); 312 accel_update_task_stats(accel_ch, accel_task, num_bytes, accel_task->nbytes); 313 if (spdk_unlikely(status != 0)) { 314 accel_update_task_stats(accel_ch, accel_task, failed, 1); 315 } 316 317 if (accel_task->seq) { 318 accel_sequence_task_cb(accel_task->seq, status); 319 return; 320 } 321 322 cb_fn = accel_task->cb_fn; 323 cb_arg = accel_task->cb_arg; 324 325 if (accel_task->has_aux) { 326 SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task->aux, link); 327 accel_task->aux = NULL; 328 accel_task->has_aux = false; 329 } 330 331 /* We should put the accel_task into the list firstly in order to avoid 332 * the accel task list is exhausted when there is recursive call to 333 * allocate accel_task in user's call back function (cb_fn) 334 */ 335 _put_task(accel_ch, accel_task); 336 337 cb_fn(cb_arg, status); 338 } 339 340 static inline int 341 accel_submit_task(struct accel_io_channel *accel_ch, struct spdk_accel_task *task) 342 { 343 struct spdk_io_channel *module_ch = accel_ch->module_ch[task->op_code]; 344 struct spdk_accel_module_if *module = g_modules_opc[task->op_code].module; 345 int rc; 346 347 rc = module->submit_tasks(module_ch, task); 348 if (spdk_unlikely(rc != 0)) { 349 accel_update_task_stats(accel_ch, task, failed, 1); 350 } 351 352 return rc; 353 } 354 355 static inline uint64_t 356 accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt) 357 { 358 uint64_t result = 0; 359 uint32_t i; 360 361 for (i = 0; i < iovcnt; ++i) { 362 result += iovs[i].iov_len; 363 } 364 365 return result; 366 } 367 368 #define ACCEL_TASK_ALLOC_AUX_BUF(task) \ 369 do { \ 370 (task)->aux = SLIST_FIRST(&(task)->accel_ch->task_aux_data_pool); \ 371 if (spdk_unlikely(!(task)->aux)) { \ 372 SPDK_ERRLOG("Fatal problem, aux data was not allocated\n"); \ 373 _put_task(task->accel_ch, task); \ 374 assert(0); \ 375 return -ENOMEM; \ 376 } \ 377 SLIST_REMOVE_HEAD(&(task)->accel_ch->task_aux_data_pool, link); \ 378 (task)->has_aux = true; \ 379 } while (0) 380 381 /* Accel framework public API for copy function */ 382 int 383 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src, 384 uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg) 385 { 386 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 387 struct spdk_accel_task *accel_task; 388 389 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 390 if (spdk_unlikely(accel_task == NULL)) { 391 return -ENOMEM; 392 } 393 394 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 395 396 accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC]; 397 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 398 accel_task->d.iovs[0].iov_base = dst; 399 accel_task->d.iovs[0].iov_len = nbytes; 400 accel_task->d.iovcnt = 1; 401 accel_task->s.iovs[0].iov_base = src; 402 accel_task->s.iovs[0].iov_len = nbytes; 403 accel_task->s.iovcnt = 1; 404 accel_task->nbytes = nbytes; 405 accel_task->op_code = SPDK_ACCEL_OPC_COPY; 406 accel_task->src_domain = NULL; 407 accel_task->dst_domain = NULL; 408 409 return accel_submit_task(accel_ch, accel_task); 410 } 411 412 /* Accel framework public API for dual cast copy function */ 413 int 414 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1, 415 void *dst2, void *src, uint64_t nbytes, 416 spdk_accel_completion_cb cb_fn, void *cb_arg) 417 { 418 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 419 struct spdk_accel_task *accel_task; 420 421 if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) { 422 SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n"); 423 return -EINVAL; 424 } 425 426 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 427 if (spdk_unlikely(accel_task == NULL)) { 428 return -ENOMEM; 429 } 430 431 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 432 433 accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC]; 434 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 435 accel_task->d2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST2]; 436 accel_task->d.iovs[0].iov_base = dst1; 437 accel_task->d.iovs[0].iov_len = nbytes; 438 accel_task->d.iovcnt = 1; 439 accel_task->d2.iovs[0].iov_base = dst2; 440 accel_task->d2.iovs[0].iov_len = nbytes; 441 accel_task->d2.iovcnt = 1; 442 accel_task->s.iovs[0].iov_base = src; 443 accel_task->s.iovs[0].iov_len = nbytes; 444 accel_task->s.iovcnt = 1; 445 accel_task->nbytes = nbytes; 446 accel_task->op_code = SPDK_ACCEL_OPC_DUALCAST; 447 accel_task->src_domain = NULL; 448 accel_task->dst_domain = NULL; 449 450 return accel_submit_task(accel_ch, accel_task); 451 } 452 453 /* Accel framework public API for compare function */ 454 455 int 456 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1, 457 void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn, 458 void *cb_arg) 459 { 460 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 461 struct spdk_accel_task *accel_task; 462 463 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 464 if (spdk_unlikely(accel_task == NULL)) { 465 return -ENOMEM; 466 } 467 468 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 469 470 accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC]; 471 accel_task->s2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC2]; 472 accel_task->s.iovs[0].iov_base = src1; 473 accel_task->s.iovs[0].iov_len = nbytes; 474 accel_task->s.iovcnt = 1; 475 accel_task->s2.iovs[0].iov_base = src2; 476 accel_task->s2.iovs[0].iov_len = nbytes; 477 accel_task->s2.iovcnt = 1; 478 accel_task->nbytes = nbytes; 479 accel_task->op_code = SPDK_ACCEL_OPC_COMPARE; 480 accel_task->src_domain = NULL; 481 accel_task->dst_domain = NULL; 482 483 return accel_submit_task(accel_ch, accel_task); 484 } 485 486 /* Accel framework public API for fill function */ 487 int 488 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst, 489 uint8_t fill, uint64_t nbytes, 490 spdk_accel_completion_cb cb_fn, void *cb_arg) 491 { 492 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 493 struct spdk_accel_task *accel_task; 494 495 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 496 if (spdk_unlikely(accel_task == NULL)) { 497 return -ENOMEM; 498 } 499 500 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 501 502 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 503 accel_task->d.iovs[0].iov_base = dst; 504 accel_task->d.iovs[0].iov_len = nbytes; 505 accel_task->d.iovcnt = 1; 506 accel_task->nbytes = nbytes; 507 memset(&accel_task->fill_pattern, fill, sizeof(uint64_t)); 508 accel_task->op_code = SPDK_ACCEL_OPC_FILL; 509 accel_task->src_domain = NULL; 510 accel_task->dst_domain = NULL; 511 512 return accel_submit_task(accel_ch, accel_task); 513 } 514 515 /* Accel framework public API for CRC-32C function */ 516 int 517 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst, 518 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn, 519 void *cb_arg) 520 { 521 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 522 struct spdk_accel_task *accel_task; 523 524 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 525 if (spdk_unlikely(accel_task == NULL)) { 526 return -ENOMEM; 527 } 528 529 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 530 531 accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC]; 532 accel_task->s.iovs[0].iov_base = src; 533 accel_task->s.iovs[0].iov_len = nbytes; 534 accel_task->s.iovcnt = 1; 535 accel_task->nbytes = nbytes; 536 accel_task->crc_dst = crc_dst; 537 accel_task->seed = seed; 538 accel_task->op_code = SPDK_ACCEL_OPC_CRC32C; 539 accel_task->src_domain = NULL; 540 accel_task->dst_domain = NULL; 541 542 return accel_submit_task(accel_ch, accel_task); 543 } 544 545 /* Accel framework public API for chained CRC-32C function */ 546 int 547 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst, 548 struct iovec *iov, uint32_t iov_cnt, uint32_t seed, 549 spdk_accel_completion_cb cb_fn, void *cb_arg) 550 { 551 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 552 struct spdk_accel_task *accel_task; 553 554 if (iov == NULL) { 555 SPDK_ERRLOG("iov should not be NULL"); 556 return -EINVAL; 557 } 558 559 if (!iov_cnt) { 560 SPDK_ERRLOG("iovcnt should not be zero value\n"); 561 return -EINVAL; 562 } 563 564 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 565 if (spdk_unlikely(accel_task == NULL)) { 566 SPDK_ERRLOG("no memory\n"); 567 assert(0); 568 return -ENOMEM; 569 } 570 571 accel_task->s.iovs = iov; 572 accel_task->s.iovcnt = iov_cnt; 573 accel_task->nbytes = accel_get_iovlen(iov, iov_cnt); 574 accel_task->crc_dst = crc_dst; 575 accel_task->seed = seed; 576 accel_task->op_code = SPDK_ACCEL_OPC_CRC32C; 577 accel_task->src_domain = NULL; 578 accel_task->dst_domain = NULL; 579 580 return accel_submit_task(accel_ch, accel_task); 581 } 582 583 /* Accel framework public API for copy with CRC-32C function */ 584 int 585 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst, 586 void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes, 587 spdk_accel_completion_cb cb_fn, void *cb_arg) 588 { 589 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 590 struct spdk_accel_task *accel_task; 591 592 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 593 if (spdk_unlikely(accel_task == NULL)) { 594 return -ENOMEM; 595 } 596 597 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 598 599 accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC]; 600 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 601 accel_task->d.iovs[0].iov_base = dst; 602 accel_task->d.iovs[0].iov_len = nbytes; 603 accel_task->d.iovcnt = 1; 604 accel_task->s.iovs[0].iov_base = src; 605 accel_task->s.iovs[0].iov_len = nbytes; 606 accel_task->s.iovcnt = 1; 607 accel_task->nbytes = nbytes; 608 accel_task->crc_dst = crc_dst; 609 accel_task->seed = seed; 610 accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C; 611 accel_task->src_domain = NULL; 612 accel_task->dst_domain = NULL; 613 614 return accel_submit_task(accel_ch, accel_task); 615 } 616 617 /* Accel framework public API for chained copy + CRC-32C function */ 618 int 619 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst, 620 struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst, 621 uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg) 622 { 623 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 624 struct spdk_accel_task *accel_task; 625 uint64_t nbytes; 626 627 if (src_iovs == NULL) { 628 SPDK_ERRLOG("iov should not be NULL"); 629 return -EINVAL; 630 } 631 632 if (!iov_cnt) { 633 SPDK_ERRLOG("iovcnt should not be zero value\n"); 634 return -EINVAL; 635 } 636 637 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 638 if (spdk_unlikely(accel_task == NULL)) { 639 SPDK_ERRLOG("no memory\n"); 640 assert(0); 641 return -ENOMEM; 642 } 643 644 nbytes = accel_get_iovlen(src_iovs, iov_cnt); 645 646 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 647 648 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 649 accel_task->d.iovs[0].iov_base = dst; 650 accel_task->d.iovs[0].iov_len = nbytes; 651 accel_task->d.iovcnt = 1; 652 accel_task->s.iovs = src_iovs; 653 accel_task->s.iovcnt = iov_cnt; 654 accel_task->nbytes = nbytes; 655 accel_task->crc_dst = crc_dst; 656 accel_task->seed = seed; 657 accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C; 658 accel_task->src_domain = NULL; 659 accel_task->dst_domain = NULL; 660 661 return accel_submit_task(accel_ch, accel_task); 662 } 663 664 int 665 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes, 666 struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size, 667 spdk_accel_completion_cb cb_fn, void *cb_arg) 668 { 669 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 670 struct spdk_accel_task *accel_task; 671 672 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 673 if (spdk_unlikely(accel_task == NULL)) { 674 return -ENOMEM; 675 } 676 677 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 678 679 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 680 accel_task->d.iovs[0].iov_base = dst; 681 accel_task->d.iovs[0].iov_len = nbytes; 682 accel_task->d.iovcnt = 1; 683 accel_task->output_size = output_size; 684 accel_task->s.iovs = src_iovs; 685 accel_task->s.iovcnt = src_iovcnt; 686 accel_task->nbytes = nbytes; 687 accel_task->op_code = SPDK_ACCEL_OPC_COMPRESS; 688 accel_task->src_domain = NULL; 689 accel_task->dst_domain = NULL; 690 691 return accel_submit_task(accel_ch, accel_task); 692 } 693 694 int 695 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs, 696 size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt, 697 uint32_t *output_size, spdk_accel_completion_cb cb_fn, 698 void *cb_arg) 699 { 700 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 701 struct spdk_accel_task *accel_task; 702 703 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 704 if (spdk_unlikely(accel_task == NULL)) { 705 return -ENOMEM; 706 } 707 708 accel_task->output_size = output_size; 709 accel_task->s.iovs = src_iovs; 710 accel_task->s.iovcnt = src_iovcnt; 711 accel_task->d.iovs = dst_iovs; 712 accel_task->d.iovcnt = dst_iovcnt; 713 accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 714 accel_task->op_code = SPDK_ACCEL_OPC_DECOMPRESS; 715 accel_task->src_domain = NULL; 716 accel_task->dst_domain = NULL; 717 718 return accel_submit_task(accel_ch, accel_task); 719 } 720 721 int 722 spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key, 723 struct iovec *dst_iovs, uint32_t dst_iovcnt, 724 struct iovec *src_iovs, uint32_t src_iovcnt, 725 uint64_t iv, uint32_t block_size, 726 spdk_accel_completion_cb cb_fn, void *cb_arg) 727 { 728 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 729 struct spdk_accel_task *accel_task; 730 731 if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) { 732 return -EINVAL; 733 } 734 735 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 736 if (spdk_unlikely(accel_task == NULL)) { 737 return -ENOMEM; 738 } 739 740 accel_task->crypto_key = key; 741 accel_task->s.iovs = src_iovs; 742 accel_task->s.iovcnt = src_iovcnt; 743 accel_task->d.iovs = dst_iovs; 744 accel_task->d.iovcnt = dst_iovcnt; 745 accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 746 accel_task->iv = iv; 747 accel_task->block_size = block_size; 748 accel_task->op_code = SPDK_ACCEL_OPC_ENCRYPT; 749 accel_task->src_domain = NULL; 750 accel_task->dst_domain = NULL; 751 752 return accel_submit_task(accel_ch, accel_task); 753 } 754 755 int 756 spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key, 757 struct iovec *dst_iovs, uint32_t dst_iovcnt, 758 struct iovec *src_iovs, uint32_t src_iovcnt, 759 uint64_t iv, uint32_t block_size, 760 spdk_accel_completion_cb cb_fn, void *cb_arg) 761 { 762 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 763 struct spdk_accel_task *accel_task; 764 765 if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) { 766 return -EINVAL; 767 } 768 769 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 770 if (spdk_unlikely(accel_task == NULL)) { 771 return -ENOMEM; 772 } 773 774 accel_task->crypto_key = key; 775 accel_task->s.iovs = src_iovs; 776 accel_task->s.iovcnt = src_iovcnt; 777 accel_task->d.iovs = dst_iovs; 778 accel_task->d.iovcnt = dst_iovcnt; 779 accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 780 accel_task->iv = iv; 781 accel_task->block_size = block_size; 782 accel_task->op_code = SPDK_ACCEL_OPC_DECRYPT; 783 accel_task->src_domain = NULL; 784 accel_task->dst_domain = NULL; 785 786 return accel_submit_task(accel_ch, accel_task); 787 } 788 789 int 790 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs, 791 uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg) 792 { 793 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 794 struct spdk_accel_task *accel_task; 795 796 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 797 if (spdk_unlikely(accel_task == NULL)) { 798 return -ENOMEM; 799 } 800 801 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 802 803 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 804 accel_task->nsrcs.srcs = sources; 805 accel_task->nsrcs.cnt = nsrcs; 806 accel_task->d.iovs[0].iov_base = dst; 807 accel_task->d.iovs[0].iov_len = nbytes; 808 accel_task->d.iovcnt = 1; 809 accel_task->nbytes = nbytes; 810 accel_task->op_code = SPDK_ACCEL_OPC_XOR; 811 accel_task->src_domain = NULL; 812 accel_task->dst_domain = NULL; 813 814 return accel_submit_task(accel_ch, accel_task); 815 } 816 817 int 818 spdk_accel_submit_dif_verify(struct spdk_io_channel *ch, 819 struct iovec *iovs, size_t iovcnt, uint32_t num_blocks, 820 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err, 821 spdk_accel_completion_cb cb_fn, void *cb_arg) 822 { 823 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 824 struct spdk_accel_task *accel_task; 825 826 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 827 if (accel_task == NULL) { 828 return -ENOMEM; 829 } 830 831 accel_task->s.iovs = iovs; 832 accel_task->s.iovcnt = iovcnt; 833 accel_task->dif.ctx = ctx; 834 accel_task->dif.err = err; 835 accel_task->dif.num_blocks = num_blocks; 836 accel_task->nbytes = num_blocks * ctx->block_size; 837 accel_task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY; 838 accel_task->src_domain = NULL; 839 accel_task->dst_domain = NULL; 840 841 return accel_submit_task(accel_ch, accel_task); 842 } 843 844 int 845 spdk_accel_submit_dif_generate(struct spdk_io_channel *ch, 846 struct iovec *iovs, size_t iovcnt, uint32_t num_blocks, 847 const struct spdk_dif_ctx *ctx, 848 spdk_accel_completion_cb cb_fn, void *cb_arg) 849 { 850 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 851 struct spdk_accel_task *accel_task; 852 853 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 854 if (accel_task == NULL) { 855 return -ENOMEM; 856 } 857 858 accel_task->s.iovs = iovs; 859 accel_task->s.iovcnt = iovcnt; 860 accel_task->dif.ctx = ctx; 861 accel_task->dif.num_blocks = num_blocks; 862 accel_task->nbytes = num_blocks * ctx->block_size; 863 accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE; 864 accel_task->src_domain = NULL; 865 accel_task->dst_domain = NULL; 866 867 return accel_submit_task(accel_ch, accel_task); 868 } 869 870 int 871 spdk_accel_submit_dif_generate_copy(struct spdk_io_channel *ch, struct iovec *dst_iovs, 872 size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt, 873 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 874 spdk_accel_completion_cb cb_fn, void *cb_arg) 875 { 876 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 877 struct spdk_accel_task *accel_task; 878 879 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 880 if (accel_task == NULL) { 881 return -ENOMEM; 882 } 883 884 accel_task->s.iovs = src_iovs; 885 accel_task->s.iovcnt = src_iovcnt; 886 accel_task->d.iovs = dst_iovs; 887 accel_task->d.iovcnt = dst_iovcnt; 888 accel_task->dif.ctx = ctx; 889 accel_task->dif.num_blocks = num_blocks; 890 accel_task->nbytes = num_blocks * ctx->block_size; 891 accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE_COPY; 892 accel_task->src_domain = NULL; 893 accel_task->dst_domain = NULL; 894 895 return accel_submit_task(accel_ch, accel_task); 896 } 897 898 int 899 spdk_accel_submit_dif_verify_copy(struct spdk_io_channel *ch, 900 struct iovec *dst_iovs, size_t dst_iovcnt, 901 struct iovec *src_iovs, size_t src_iovcnt, uint32_t num_blocks, 902 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err, 903 spdk_accel_completion_cb cb_fn, void *cb_arg) 904 { 905 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 906 struct spdk_accel_task *accel_task; 907 908 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 909 if (accel_task == NULL) { 910 return -ENOMEM; 911 } 912 913 accel_task->s.iovs = src_iovs; 914 accel_task->s.iovcnt = src_iovcnt; 915 accel_task->d.iovs = dst_iovs; 916 accel_task->d.iovcnt = dst_iovcnt; 917 accel_task->dif.ctx = ctx; 918 accel_task->dif.err = err; 919 accel_task->dif.num_blocks = num_blocks; 920 accel_task->nbytes = num_blocks * ctx->block_size; 921 accel_task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY_COPY; 922 accel_task->src_domain = NULL; 923 accel_task->dst_domain = NULL; 924 925 return accel_submit_task(accel_ch, accel_task); 926 } 927 928 static inline struct accel_buffer * 929 accel_get_buf(struct accel_io_channel *ch, uint64_t len) 930 { 931 struct accel_buffer *buf; 932 933 buf = SLIST_FIRST(&ch->buf_pool); 934 if (spdk_unlikely(buf == NULL)) { 935 accel_update_stats(ch, retry.bufdesc, 1); 936 return NULL; 937 } 938 939 SLIST_REMOVE_HEAD(&ch->buf_pool, link); 940 buf->len = len; 941 buf->buf = NULL; 942 buf->seq = NULL; 943 buf->cb_fn = NULL; 944 945 return buf; 946 } 947 948 static inline void 949 accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf) 950 { 951 if (buf->buf != NULL) { 952 spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len); 953 } 954 955 SLIST_INSERT_HEAD(&ch->buf_pool, buf, link); 956 } 957 958 static inline struct spdk_accel_sequence * 959 accel_sequence_get(struct accel_io_channel *ch) 960 { 961 struct spdk_accel_sequence *seq; 962 963 seq = SLIST_FIRST(&ch->seq_pool); 964 if (spdk_unlikely(seq == NULL)) { 965 accel_update_stats(ch, retry.sequence, 1); 966 return NULL; 967 } 968 969 accel_update_stats(ch, sequence_outstanding, 1); 970 SLIST_REMOVE_HEAD(&ch->seq_pool, link); 971 972 TAILQ_INIT(&seq->tasks); 973 SLIST_INIT(&seq->bounce_bufs); 974 975 seq->ch = ch; 976 seq->status = 0; 977 seq->state = ACCEL_SEQUENCE_STATE_INIT; 978 seq->in_process_sequence = false; 979 980 return seq; 981 } 982 983 static inline void 984 accel_sequence_put(struct spdk_accel_sequence *seq) 985 { 986 struct accel_io_channel *ch = seq->ch; 987 struct accel_buffer *buf; 988 989 while (!SLIST_EMPTY(&seq->bounce_bufs)) { 990 buf = SLIST_FIRST(&seq->bounce_bufs); 991 SLIST_REMOVE_HEAD(&seq->bounce_bufs, link); 992 accel_put_buf(seq->ch, buf); 993 } 994 995 assert(TAILQ_EMPTY(&seq->tasks)); 996 seq->ch = NULL; 997 998 SLIST_INSERT_HEAD(&ch->seq_pool, seq, link); 999 accel_update_stats(ch, sequence_outstanding, -1); 1000 } 1001 1002 static void accel_sequence_task_cb(void *cb_arg, int status); 1003 1004 static inline struct spdk_accel_task * 1005 accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq, 1006 spdk_accel_step_cb cb_fn, void *cb_arg) 1007 { 1008 struct spdk_accel_task *task; 1009 1010 task = _get_task(ch, NULL, NULL); 1011 if (spdk_unlikely(task == NULL)) { 1012 return task; 1013 } 1014 1015 task->step_cb_fn = cb_fn; 1016 task->cb_arg = cb_arg; 1017 task->seq = seq; 1018 1019 return task; 1020 } 1021 1022 int 1023 spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1024 struct iovec *dst_iovs, uint32_t dst_iovcnt, 1025 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1026 struct iovec *src_iovs, uint32_t src_iovcnt, 1027 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1028 spdk_accel_step_cb cb_fn, void *cb_arg) 1029 { 1030 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1031 struct spdk_accel_task *task; 1032 struct spdk_accel_sequence *seq = *pseq; 1033 1034 if (seq == NULL) { 1035 seq = accel_sequence_get(accel_ch); 1036 if (spdk_unlikely(seq == NULL)) { 1037 return -ENOMEM; 1038 } 1039 } 1040 1041 assert(seq->ch == accel_ch); 1042 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1043 if (spdk_unlikely(task == NULL)) { 1044 if (*pseq == NULL) { 1045 accel_sequence_put(seq); 1046 } 1047 1048 return -ENOMEM; 1049 } 1050 1051 task->dst_domain = dst_domain; 1052 task->dst_domain_ctx = dst_domain_ctx; 1053 task->d.iovs = dst_iovs; 1054 task->d.iovcnt = dst_iovcnt; 1055 task->src_domain = src_domain; 1056 task->src_domain_ctx = src_domain_ctx; 1057 task->s.iovs = src_iovs; 1058 task->s.iovcnt = src_iovcnt; 1059 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1060 task->op_code = SPDK_ACCEL_OPC_COPY; 1061 1062 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1063 *pseq = seq; 1064 1065 return 0; 1066 } 1067 1068 int 1069 spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1070 void *buf, uint64_t len, 1071 struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern, 1072 spdk_accel_step_cb cb_fn, void *cb_arg) 1073 { 1074 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1075 struct spdk_accel_task *task; 1076 struct spdk_accel_sequence *seq = *pseq; 1077 1078 if (seq == NULL) { 1079 seq = accel_sequence_get(accel_ch); 1080 if (spdk_unlikely(seq == NULL)) { 1081 return -ENOMEM; 1082 } 1083 } 1084 1085 assert(seq->ch == accel_ch); 1086 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1087 if (spdk_unlikely(task == NULL)) { 1088 if (*pseq == NULL) { 1089 accel_sequence_put(seq); 1090 } 1091 1092 return -ENOMEM; 1093 } 1094 1095 memset(&task->fill_pattern, pattern, sizeof(uint64_t)); 1096 1097 task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool); 1098 if (spdk_unlikely(!task->aux)) { 1099 SPDK_ERRLOG("Fatal problem, aux data was not allocated\n"); 1100 if (*pseq == NULL) { 1101 accel_sequence_put((seq)); 1102 } 1103 1104 task->seq = NULL; 1105 _put_task(task->accel_ch, task); 1106 assert(0); 1107 return -ENOMEM; 1108 } 1109 SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link); 1110 task->has_aux = true; 1111 1112 task->d.iovs = &task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 1113 task->d.iovs[0].iov_base = buf; 1114 task->d.iovs[0].iov_len = len; 1115 task->d.iovcnt = 1; 1116 task->nbytes = len; 1117 task->src_domain = NULL; 1118 task->dst_domain = domain; 1119 task->dst_domain_ctx = domain_ctx; 1120 task->op_code = SPDK_ACCEL_OPC_FILL; 1121 1122 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1123 *pseq = seq; 1124 1125 return 0; 1126 } 1127 1128 int 1129 spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1130 struct iovec *dst_iovs, size_t dst_iovcnt, 1131 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1132 struct iovec *src_iovs, size_t src_iovcnt, 1133 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1134 spdk_accel_step_cb cb_fn, void *cb_arg) 1135 { 1136 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1137 struct spdk_accel_task *task; 1138 struct spdk_accel_sequence *seq = *pseq; 1139 1140 if (seq == NULL) { 1141 seq = accel_sequence_get(accel_ch); 1142 if (spdk_unlikely(seq == NULL)) { 1143 return -ENOMEM; 1144 } 1145 } 1146 1147 assert(seq->ch == accel_ch); 1148 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1149 if (spdk_unlikely(task == NULL)) { 1150 if (*pseq == NULL) { 1151 accel_sequence_put(seq); 1152 } 1153 1154 return -ENOMEM; 1155 } 1156 1157 /* TODO: support output_size for chaining */ 1158 task->output_size = NULL; 1159 task->dst_domain = dst_domain; 1160 task->dst_domain_ctx = dst_domain_ctx; 1161 task->d.iovs = dst_iovs; 1162 task->d.iovcnt = dst_iovcnt; 1163 task->src_domain = src_domain; 1164 task->src_domain_ctx = src_domain_ctx; 1165 task->s.iovs = src_iovs; 1166 task->s.iovcnt = src_iovcnt; 1167 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1168 task->op_code = SPDK_ACCEL_OPC_DECOMPRESS; 1169 1170 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1171 *pseq = seq; 1172 1173 return 0; 1174 } 1175 1176 int 1177 spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1178 struct spdk_accel_crypto_key *key, 1179 struct iovec *dst_iovs, uint32_t dst_iovcnt, 1180 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1181 struct iovec *src_iovs, uint32_t src_iovcnt, 1182 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1183 uint64_t iv, uint32_t block_size, 1184 spdk_accel_step_cb cb_fn, void *cb_arg) 1185 { 1186 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1187 struct spdk_accel_task *task; 1188 struct spdk_accel_sequence *seq = *pseq; 1189 1190 assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size); 1191 1192 if (seq == NULL) { 1193 seq = accel_sequence_get(accel_ch); 1194 if (spdk_unlikely(seq == NULL)) { 1195 return -ENOMEM; 1196 } 1197 } 1198 1199 assert(seq->ch == accel_ch); 1200 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1201 if (spdk_unlikely(task == NULL)) { 1202 if (*pseq == NULL) { 1203 accel_sequence_put(seq); 1204 } 1205 1206 return -ENOMEM; 1207 } 1208 1209 task->crypto_key = key; 1210 task->src_domain = src_domain; 1211 task->src_domain_ctx = src_domain_ctx; 1212 task->s.iovs = src_iovs; 1213 task->s.iovcnt = src_iovcnt; 1214 task->dst_domain = dst_domain; 1215 task->dst_domain_ctx = dst_domain_ctx; 1216 task->d.iovs = dst_iovs; 1217 task->d.iovcnt = dst_iovcnt; 1218 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1219 task->iv = iv; 1220 task->block_size = block_size; 1221 task->op_code = SPDK_ACCEL_OPC_ENCRYPT; 1222 1223 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1224 *pseq = seq; 1225 1226 return 0; 1227 } 1228 1229 int 1230 spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1231 struct spdk_accel_crypto_key *key, 1232 struct iovec *dst_iovs, uint32_t dst_iovcnt, 1233 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1234 struct iovec *src_iovs, uint32_t src_iovcnt, 1235 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1236 uint64_t iv, uint32_t block_size, 1237 spdk_accel_step_cb cb_fn, void *cb_arg) 1238 { 1239 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1240 struct spdk_accel_task *task; 1241 struct spdk_accel_sequence *seq = *pseq; 1242 1243 assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size); 1244 1245 if (seq == NULL) { 1246 seq = accel_sequence_get(accel_ch); 1247 if (spdk_unlikely(seq == NULL)) { 1248 return -ENOMEM; 1249 } 1250 } 1251 1252 assert(seq->ch == accel_ch); 1253 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1254 if (spdk_unlikely(task == NULL)) { 1255 if (*pseq == NULL) { 1256 accel_sequence_put(seq); 1257 } 1258 1259 return -ENOMEM; 1260 } 1261 1262 task->crypto_key = key; 1263 task->src_domain = src_domain; 1264 task->src_domain_ctx = src_domain_ctx; 1265 task->s.iovs = src_iovs; 1266 task->s.iovcnt = src_iovcnt; 1267 task->dst_domain = dst_domain; 1268 task->dst_domain_ctx = dst_domain_ctx; 1269 task->d.iovs = dst_iovs; 1270 task->d.iovcnt = dst_iovcnt; 1271 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1272 task->iv = iv; 1273 task->block_size = block_size; 1274 task->op_code = SPDK_ACCEL_OPC_DECRYPT; 1275 1276 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1277 *pseq = seq; 1278 1279 return 0; 1280 } 1281 1282 int 1283 spdk_accel_append_crc32c(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1284 uint32_t *dst, struct iovec *iovs, uint32_t iovcnt, 1285 struct spdk_memory_domain *domain, void *domain_ctx, 1286 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg) 1287 { 1288 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1289 struct spdk_accel_task *task; 1290 struct spdk_accel_sequence *seq = *pseq; 1291 1292 if (seq == NULL) { 1293 seq = accel_sequence_get(accel_ch); 1294 if (spdk_unlikely(seq == NULL)) { 1295 return -ENOMEM; 1296 } 1297 } 1298 1299 assert(seq->ch == accel_ch); 1300 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1301 if (spdk_unlikely(task == NULL)) { 1302 if (*pseq == NULL) { 1303 accel_sequence_put(seq); 1304 } 1305 1306 return -ENOMEM; 1307 } 1308 1309 task->s.iovs = iovs; 1310 task->s.iovcnt = iovcnt; 1311 task->src_domain = domain; 1312 task->src_domain_ctx = domain_ctx; 1313 task->nbytes = accel_get_iovlen(iovs, iovcnt); 1314 task->crc_dst = dst; 1315 task->seed = seed; 1316 task->op_code = SPDK_ACCEL_OPC_CRC32C; 1317 task->dst_domain = NULL; 1318 1319 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1320 *pseq = seq; 1321 1322 return 0; 1323 } 1324 1325 int 1326 spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf, 1327 struct spdk_memory_domain **domain, void **domain_ctx) 1328 { 1329 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1330 struct accel_buffer *accel_buf; 1331 1332 accel_buf = accel_get_buf(accel_ch, len); 1333 if (spdk_unlikely(accel_buf == NULL)) { 1334 return -ENOMEM; 1335 } 1336 1337 accel_buf->ch = accel_ch; 1338 1339 /* We always return the same pointer and identify the buffers through domain_ctx */ 1340 *buf = ACCEL_BUFFER_BASE; 1341 *domain_ctx = accel_buf; 1342 *domain = g_accel_domain; 1343 1344 return 0; 1345 } 1346 1347 void 1348 spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf, 1349 struct spdk_memory_domain *domain, void *domain_ctx) 1350 { 1351 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1352 struct accel_buffer *accel_buf = domain_ctx; 1353 1354 assert(domain == g_accel_domain); 1355 assert(buf == ACCEL_BUFFER_BASE); 1356 1357 accel_put_buf(accel_ch, accel_buf); 1358 } 1359 1360 static void 1361 accel_sequence_complete_task(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1362 { 1363 struct accel_io_channel *ch = seq->ch; 1364 spdk_accel_step_cb cb_fn; 1365 void *cb_arg; 1366 1367 TAILQ_REMOVE(&seq->tasks, task, seq_link); 1368 cb_fn = task->step_cb_fn; 1369 cb_arg = task->cb_arg; 1370 task->seq = NULL; 1371 if (task->has_aux) { 1372 SLIST_INSERT_HEAD(&ch->task_aux_data_pool, task->aux, link); 1373 task->aux = NULL; 1374 task->has_aux = false; 1375 } 1376 1377 _put_task(ch, task); 1378 1379 if (cb_fn != NULL) { 1380 cb_fn(cb_arg); 1381 } 1382 } 1383 1384 static void 1385 accel_sequence_complete_tasks(struct spdk_accel_sequence *seq) 1386 { 1387 struct spdk_accel_task *task; 1388 1389 while (!TAILQ_EMPTY(&seq->tasks)) { 1390 task = TAILQ_FIRST(&seq->tasks); 1391 accel_sequence_complete_task(seq, task); 1392 } 1393 } 1394 1395 static void 1396 accel_sequence_complete(struct spdk_accel_sequence *seq) 1397 { 1398 spdk_accel_completion_cb cb_fn = seq->cb_fn; 1399 void *cb_arg = seq->cb_arg; 1400 int status = seq->status; 1401 1402 SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, status); 1403 1404 accel_update_stats(seq->ch, sequence_executed, 1); 1405 if (spdk_unlikely(status != 0)) { 1406 accel_update_stats(seq->ch, sequence_failed, 1); 1407 } 1408 1409 /* First notify all users that appended operations to this sequence */ 1410 accel_sequence_complete_tasks(seq); 1411 accel_sequence_put(seq); 1412 1413 /* Then notify the user that finished the sequence */ 1414 cb_fn(cb_arg, status); 1415 } 1416 1417 static void 1418 accel_update_virt_iov(struct iovec *diov, struct iovec *siov, struct accel_buffer *accel_buf) 1419 { 1420 uintptr_t offset; 1421 1422 offset = (uintptr_t)siov->iov_base & ACCEL_BUFFER_OFFSET_MASK; 1423 assert(offset < accel_buf->len); 1424 1425 diov->iov_base = (char *)accel_buf->buf + offset; 1426 diov->iov_len = siov->iov_len; 1427 } 1428 1429 static void 1430 accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf) 1431 { 1432 struct spdk_accel_task *task; 1433 struct iovec *iov; 1434 1435 /* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks 1436 * in a sequence that were using it. 1437 */ 1438 TAILQ_FOREACH(task, &seq->tasks, seq_link) { 1439 if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) { 1440 if (!task->has_aux) { 1441 task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool); 1442 assert(task->aux && "Can't allocate aux data structure"); 1443 task->has_aux = true; 1444 SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link); 1445 } 1446 1447 iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_SRC]; 1448 assert(task->s.iovcnt == 1); 1449 accel_update_virt_iov(iov, &task->s.iovs[0], buf); 1450 task->src_domain = NULL; 1451 task->s.iovs = iov; 1452 } 1453 if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) { 1454 if (!task->has_aux) { 1455 task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool); 1456 assert(task->aux && "Can't allocate aux data structure"); 1457 task->has_aux = true; 1458 SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link); 1459 } 1460 1461 iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_DST]; 1462 assert(task->d.iovcnt == 1); 1463 accel_update_virt_iov(iov, &task->d.iovs[0], buf); 1464 task->dst_domain = NULL; 1465 task->d.iovs = iov; 1466 } 1467 } 1468 } 1469 1470 static void accel_process_sequence(struct spdk_accel_sequence *seq); 1471 1472 static void 1473 accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf) 1474 { 1475 struct accel_buffer *accel_buf; 1476 1477 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1478 1479 assert(accel_buf->seq != NULL); 1480 assert(accel_buf->buf == NULL); 1481 accel_buf->buf = buf; 1482 1483 assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF); 1484 accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF); 1485 accel_sequence_set_virtbuf(accel_buf->seq, accel_buf); 1486 accel_process_sequence(accel_buf->seq); 1487 } 1488 1489 static bool 1490 accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf, 1491 spdk_iobuf_get_cb cb_fn) 1492 { 1493 struct accel_io_channel *ch = seq->ch; 1494 1495 assert(buf->seq == NULL); 1496 1497 buf->seq = seq; 1498 1499 /* Buffer might be already allocated by memory domain translation. */ 1500 if (buf->buf) { 1501 return true; 1502 } 1503 1504 buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn); 1505 if (spdk_unlikely(buf->buf == NULL)) { 1506 accel_update_stats(ch, retry.iobuf, 1); 1507 return false; 1508 } 1509 1510 return true; 1511 } 1512 1513 static bool 1514 accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1515 { 1516 /* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to 1517 * NULL */ 1518 if (task->src_domain == g_accel_domain) { 1519 if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx, 1520 accel_iobuf_get_virtbuf_cb)) { 1521 return false; 1522 } 1523 1524 accel_sequence_set_virtbuf(seq, task->src_domain_ctx); 1525 } 1526 1527 if (task->dst_domain == g_accel_domain) { 1528 if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx, 1529 accel_iobuf_get_virtbuf_cb)) { 1530 return false; 1531 } 1532 1533 accel_sequence_set_virtbuf(seq, task->dst_domain_ctx); 1534 } 1535 1536 return true; 1537 } 1538 1539 static void 1540 accel_sequence_get_buf_cb(struct spdk_iobuf_entry *entry, void *buf) 1541 { 1542 struct accel_buffer *accel_buf; 1543 1544 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1545 1546 assert(accel_buf->seq != NULL); 1547 assert(accel_buf->buf == NULL); 1548 accel_buf->buf = buf; 1549 1550 accel_sequence_set_virtbuf(accel_buf->seq, accel_buf); 1551 accel_buf->cb_fn(accel_buf->seq, accel_buf->cb_ctx); 1552 } 1553 1554 bool 1555 spdk_accel_alloc_sequence_buf(struct spdk_accel_sequence *seq, void *buf, 1556 struct spdk_memory_domain *domain, void *domain_ctx, 1557 spdk_accel_sequence_get_buf_cb cb_fn, void *cb_ctx) 1558 { 1559 struct accel_buffer *accel_buf = domain_ctx; 1560 1561 assert(domain == g_accel_domain); 1562 accel_buf->cb_fn = cb_fn; 1563 accel_buf->cb_ctx = cb_ctx; 1564 1565 if (!accel_sequence_alloc_buf(seq, accel_buf, accel_sequence_get_buf_cb)) { 1566 return false; 1567 } 1568 1569 accel_sequence_set_virtbuf(seq, accel_buf); 1570 1571 return true; 1572 } 1573 1574 struct spdk_accel_task * 1575 spdk_accel_sequence_first_task(struct spdk_accel_sequence *seq) 1576 { 1577 return TAILQ_FIRST(&seq->tasks); 1578 } 1579 1580 struct spdk_accel_task * 1581 spdk_accel_sequence_next_task(struct spdk_accel_task *task) 1582 { 1583 return TAILQ_NEXT(task, seq_link); 1584 } 1585 1586 static inline void 1587 accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs, 1588 uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx, 1589 struct accel_buffer *buf) 1590 { 1591 bounce->orig_iovs = *iovs; 1592 bounce->orig_iovcnt = *iovcnt; 1593 bounce->orig_domain = *domain; 1594 bounce->orig_domain_ctx = *domain_ctx; 1595 bounce->iov.iov_base = buf->buf; 1596 bounce->iov.iov_len = buf->len; 1597 1598 *iovs = &bounce->iov; 1599 *iovcnt = 1; 1600 *domain = NULL; 1601 } 1602 1603 static void 1604 accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf) 1605 { 1606 struct spdk_accel_task *task; 1607 struct accel_buffer *accel_buf; 1608 1609 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1610 assert(accel_buf->buf == NULL); 1611 accel_buf->buf = buf; 1612 1613 task = TAILQ_FIRST(&accel_buf->seq->tasks); 1614 assert(task != NULL); 1615 1616 assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF); 1617 accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF); 1618 assert(task->aux); 1619 assert(task->has_aux); 1620 accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain, 1621 &task->src_domain_ctx, accel_buf); 1622 accel_process_sequence(accel_buf->seq); 1623 } 1624 1625 static void 1626 accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf) 1627 { 1628 struct spdk_accel_task *task; 1629 struct accel_buffer *accel_buf; 1630 1631 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1632 assert(accel_buf->buf == NULL); 1633 accel_buf->buf = buf; 1634 1635 task = TAILQ_FIRST(&accel_buf->seq->tasks); 1636 assert(task != NULL); 1637 1638 assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF); 1639 accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF); 1640 assert(task->aux); 1641 assert(task->has_aux); 1642 accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain, 1643 &task->dst_domain_ctx, accel_buf); 1644 accel_process_sequence(accel_buf->seq); 1645 } 1646 1647 static int 1648 accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1649 { 1650 struct accel_buffer *buf; 1651 1652 if (task->src_domain != NULL) { 1653 /* By the time we're here, accel buffers should have been allocated */ 1654 assert(task->src_domain != g_accel_domain); 1655 1656 if (!task->has_aux) { 1657 task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool); 1658 if (spdk_unlikely(!task->aux)) { 1659 SPDK_ERRLOG("Can't allocate aux data structure\n"); 1660 assert(0); 1661 return -EAGAIN; 1662 } 1663 task->has_aux = true; 1664 SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link); 1665 } 1666 buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt)); 1667 if (buf == NULL) { 1668 SPDK_ERRLOG("Couldn't allocate buffer descriptor\n"); 1669 return -ENOMEM; 1670 } 1671 1672 SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link); 1673 if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) { 1674 return -EAGAIN; 1675 } 1676 1677 accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt, 1678 &task->src_domain, &task->src_domain_ctx, buf); 1679 } 1680 1681 if (task->dst_domain != NULL) { 1682 /* By the time we're here, accel buffers should have been allocated */ 1683 assert(task->dst_domain != g_accel_domain); 1684 1685 if (!task->has_aux) { 1686 task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool); 1687 if (spdk_unlikely(!task->aux)) { 1688 SPDK_ERRLOG("Can't allocate aux data structure\n"); 1689 assert(0); 1690 return -EAGAIN; 1691 } 1692 task->has_aux = true; 1693 SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link); 1694 } 1695 buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt)); 1696 if (buf == NULL) { 1697 /* The src buffer will be released when a sequence is completed */ 1698 SPDK_ERRLOG("Couldn't allocate buffer descriptor\n"); 1699 return -ENOMEM; 1700 } 1701 1702 SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link); 1703 if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) { 1704 return -EAGAIN; 1705 } 1706 1707 accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt, 1708 &task->dst_domain, &task->dst_domain_ctx, buf); 1709 } 1710 1711 return 0; 1712 } 1713 1714 static void 1715 accel_task_pull_data_cb(void *ctx, int status) 1716 { 1717 struct spdk_accel_sequence *seq = ctx; 1718 1719 assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA); 1720 if (spdk_likely(status == 0)) { 1721 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK); 1722 } else { 1723 accel_sequence_set_fail(seq, status); 1724 } 1725 1726 accel_process_sequence(seq); 1727 } 1728 1729 static void 1730 accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1731 { 1732 int rc; 1733 1734 assert(task->has_aux); 1735 assert(task->aux); 1736 assert(task->aux->bounce.s.orig_iovs != NULL); 1737 assert(task->aux->bounce.s.orig_domain != NULL); 1738 assert(task->aux->bounce.s.orig_domain != g_accel_domain); 1739 assert(!g_modules_opc[task->op_code].supports_memory_domains); 1740 1741 rc = spdk_memory_domain_pull_data(task->aux->bounce.s.orig_domain, 1742 task->aux->bounce.s.orig_domain_ctx, 1743 task->aux->bounce.s.orig_iovs, task->aux->bounce.s.orig_iovcnt, 1744 task->s.iovs, task->s.iovcnt, 1745 accel_task_pull_data_cb, seq); 1746 if (spdk_unlikely(rc != 0)) { 1747 SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n", 1748 spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc); 1749 accel_sequence_set_fail(seq, rc); 1750 } 1751 } 1752 1753 static void 1754 accel_task_push_data_cb(void *ctx, int status) 1755 { 1756 struct spdk_accel_sequence *seq = ctx; 1757 1758 assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA); 1759 if (spdk_likely(status == 0)) { 1760 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK); 1761 } else { 1762 accel_sequence_set_fail(seq, status); 1763 } 1764 1765 accel_process_sequence(seq); 1766 } 1767 1768 static void 1769 accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1770 { 1771 int rc; 1772 1773 assert(task->has_aux); 1774 assert(task->aux); 1775 assert(task->aux->bounce.d.orig_iovs != NULL); 1776 assert(task->aux->bounce.d.orig_domain != NULL); 1777 assert(task->aux->bounce.d.orig_domain != g_accel_domain); 1778 assert(!g_modules_opc[task->op_code].supports_memory_domains); 1779 1780 rc = spdk_memory_domain_push_data(task->aux->bounce.d.orig_domain, 1781 task->aux->bounce.d.orig_domain_ctx, 1782 task->aux->bounce.d.orig_iovs, task->aux->bounce.d.orig_iovcnt, 1783 task->d.iovs, task->d.iovcnt, 1784 accel_task_push_data_cb, seq); 1785 if (spdk_unlikely(rc != 0)) { 1786 SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n", 1787 spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc); 1788 accel_sequence_set_fail(seq, rc); 1789 } 1790 } 1791 1792 static void 1793 accel_process_sequence(struct spdk_accel_sequence *seq) 1794 { 1795 struct accel_io_channel *accel_ch = seq->ch; 1796 struct spdk_accel_task *task; 1797 enum accel_sequence_state state; 1798 int rc; 1799 1800 /* Prevent recursive calls to this function */ 1801 if (spdk_unlikely(seq->in_process_sequence)) { 1802 return; 1803 } 1804 seq->in_process_sequence = true; 1805 1806 task = TAILQ_FIRST(&seq->tasks); 1807 do { 1808 state = seq->state; 1809 switch (state) { 1810 case ACCEL_SEQUENCE_STATE_INIT: 1811 if (g_accel_driver != NULL) { 1812 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS); 1813 break; 1814 } 1815 /* Fall through */ 1816 case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF: 1817 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF); 1818 if (!accel_sequence_check_virtbuf(seq, task)) { 1819 /* We couldn't allocate a buffer, wait until one is available */ 1820 break; 1821 } 1822 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF); 1823 /* Fall through */ 1824 case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF: 1825 /* If a module supports memory domains, we don't need to allocate bounce 1826 * buffers */ 1827 if (g_modules_opc[task->op_code].supports_memory_domains) { 1828 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK); 1829 break; 1830 } 1831 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF); 1832 rc = accel_sequence_check_bouncebuf(seq, task); 1833 if (spdk_unlikely(rc != 0)) { 1834 /* We couldn't allocate a buffer, wait until one is available */ 1835 if (rc == -EAGAIN) { 1836 break; 1837 } 1838 accel_sequence_set_fail(seq, rc); 1839 break; 1840 } 1841 if (task->has_aux && task->s.iovs == &task->aux->bounce.s.iov) { 1842 assert(task->aux->bounce.s.orig_iovs); 1843 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA); 1844 break; 1845 } 1846 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK); 1847 /* Fall through */ 1848 case ACCEL_SEQUENCE_STATE_EXEC_TASK: 1849 SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n", 1850 g_opcode_strings[task->op_code], seq); 1851 1852 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK); 1853 rc = accel_submit_task(accel_ch, task); 1854 if (spdk_unlikely(rc != 0)) { 1855 SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n", 1856 g_opcode_strings[task->op_code], seq); 1857 accel_sequence_set_fail(seq, rc); 1858 } 1859 break; 1860 case ACCEL_SEQUENCE_STATE_PULL_DATA: 1861 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA); 1862 accel_task_pull_data(seq, task); 1863 break; 1864 case ACCEL_SEQUENCE_STATE_COMPLETE_TASK: 1865 if (task->has_aux && task->d.iovs == &task->aux->bounce.d.iov) { 1866 assert(task->aux->bounce.d.orig_iovs); 1867 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA); 1868 break; 1869 } 1870 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK); 1871 break; 1872 case ACCEL_SEQUENCE_STATE_PUSH_DATA: 1873 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA); 1874 accel_task_push_data(seq, task); 1875 break; 1876 case ACCEL_SEQUENCE_STATE_NEXT_TASK: 1877 accel_sequence_complete_task(seq, task); 1878 /* Check if there are any remaining tasks */ 1879 task = TAILQ_FIRST(&seq->tasks); 1880 if (task == NULL) { 1881 /* Immediately return here to make sure we don't touch the sequence 1882 * after it's completed */ 1883 accel_sequence_complete(seq); 1884 return; 1885 } 1886 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT); 1887 break; 1888 case ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS: 1889 assert(!TAILQ_EMPTY(&seq->tasks)); 1890 1891 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS); 1892 rc = g_accel_driver->execute_sequence(accel_ch->driver_channel, seq); 1893 if (spdk_unlikely(rc != 0)) { 1894 SPDK_ERRLOG("Failed to execute sequence: %p using driver: %s\n", 1895 seq, g_accel_driver->name); 1896 accel_sequence_set_fail(seq, rc); 1897 } 1898 break; 1899 case ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS: 1900 /* Get the task again, as the driver might have completed some tasks 1901 * synchronously */ 1902 task = TAILQ_FIRST(&seq->tasks); 1903 if (task == NULL) { 1904 /* Immediately return here to make sure we don't touch the sequence 1905 * after it's completed */ 1906 accel_sequence_complete(seq); 1907 return; 1908 } 1909 /* We don't want to execute the next task through the driver, so we 1910 * explicitly omit the INIT state here */ 1911 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF); 1912 break; 1913 case ACCEL_SEQUENCE_STATE_ERROR: 1914 /* Immediately return here to make sure we don't touch the sequence 1915 * after it's completed */ 1916 assert(seq->status != 0); 1917 accel_sequence_complete(seq); 1918 return; 1919 case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF: 1920 case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF: 1921 case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA: 1922 case ACCEL_SEQUENCE_STATE_AWAIT_TASK: 1923 case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA: 1924 case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS: 1925 break; 1926 default: 1927 assert(0 && "bad state"); 1928 break; 1929 } 1930 } while (seq->state != state); 1931 1932 seq->in_process_sequence = false; 1933 } 1934 1935 static void 1936 accel_sequence_task_cb(void *cb_arg, int status) 1937 { 1938 struct spdk_accel_sequence *seq = cb_arg; 1939 struct spdk_accel_task *task = TAILQ_FIRST(&seq->tasks); 1940 1941 switch (seq->state) { 1942 case ACCEL_SEQUENCE_STATE_AWAIT_TASK: 1943 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK); 1944 if (spdk_unlikely(status != 0)) { 1945 SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n", 1946 g_opcode_strings[task->op_code], seq); 1947 accel_sequence_set_fail(seq, status); 1948 } 1949 1950 accel_process_sequence(seq); 1951 break; 1952 case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS: 1953 assert(g_accel_driver != NULL); 1954 /* Immediately remove the task from the outstanding list to make sure the next call 1955 * to spdk_accel_sequence_first_task() doesn't return it */ 1956 accel_sequence_complete_task(seq, task); 1957 if (spdk_unlikely(status != 0)) { 1958 SPDK_ERRLOG("Failed to execute %s operation, sequence: %p through " 1959 "driver: %s\n", g_opcode_strings[task->op_code], seq, 1960 g_accel_driver->name); 1961 /* Update status without using accel_sequence_set_fail() to avoid changing 1962 * seq's state to ERROR until driver calls spdk_accel_sequence_continue() */ 1963 seq->status = status; 1964 } 1965 break; 1966 default: 1967 assert(0 && "bad state"); 1968 break; 1969 } 1970 } 1971 1972 void 1973 spdk_accel_sequence_continue(struct spdk_accel_sequence *seq) 1974 { 1975 assert(g_accel_driver != NULL); 1976 assert(seq->state == ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS); 1977 1978 if (spdk_likely(seq->status == 0)) { 1979 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS); 1980 } else { 1981 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR); 1982 } 1983 1984 accel_process_sequence(seq); 1985 } 1986 1987 static bool 1988 accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt) 1989 { 1990 /* For now, just do a dumb check that the iovecs arrays are exactly the same */ 1991 if (iovacnt != iovbcnt) { 1992 return false; 1993 } 1994 1995 return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0; 1996 } 1997 1998 static bool 1999 accel_task_set_dstbuf(struct spdk_accel_task *task, struct spdk_accel_task *next) 2000 { 2001 struct spdk_accel_task *prev; 2002 2003 switch (task->op_code) { 2004 case SPDK_ACCEL_OPC_DECOMPRESS: 2005 case SPDK_ACCEL_OPC_FILL: 2006 case SPDK_ACCEL_OPC_ENCRYPT: 2007 case SPDK_ACCEL_OPC_DECRYPT: 2008 if (task->dst_domain != next->src_domain) { 2009 return false; 2010 } 2011 if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt, 2012 next->s.iovs, next->s.iovcnt)) { 2013 return false; 2014 } 2015 task->d.iovs = next->d.iovs; 2016 task->d.iovcnt = next->d.iovcnt; 2017 task->dst_domain = next->dst_domain; 2018 task->dst_domain_ctx = next->dst_domain_ctx; 2019 break; 2020 case SPDK_ACCEL_OPC_CRC32C: 2021 /* crc32 is special, because it doesn't have a dst buffer */ 2022 if (task->src_domain != next->src_domain) { 2023 return false; 2024 } 2025 if (!accel_compare_iovs(task->s.iovs, task->s.iovcnt, 2026 next->s.iovs, next->s.iovcnt)) { 2027 return false; 2028 } 2029 /* We can only change crc32's buffer if we can change previous task's buffer */ 2030 prev = TAILQ_PREV(task, accel_sequence_tasks, seq_link); 2031 if (prev == NULL) { 2032 return false; 2033 } 2034 if (!accel_task_set_dstbuf(prev, next)) { 2035 return false; 2036 } 2037 task->s.iovs = next->d.iovs; 2038 task->s.iovcnt = next->d.iovcnt; 2039 task->src_domain = next->dst_domain; 2040 task->src_domain_ctx = next->dst_domain_ctx; 2041 break; 2042 default: 2043 return false; 2044 } 2045 2046 return true; 2047 } 2048 2049 static void 2050 accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task, 2051 struct spdk_accel_task **next_task) 2052 { 2053 struct spdk_accel_task *next = *next_task; 2054 2055 switch (task->op_code) { 2056 case SPDK_ACCEL_OPC_COPY: 2057 /* We only allow changing src of operations that actually have a src, e.g. we never 2058 * do it for fill. Theoretically, it is possible, but we'd have to be careful to 2059 * change the src of the operation after fill (which in turn could also be a fill). 2060 * So, for the sake of simplicity, skip this type of operations for now. 2061 */ 2062 if (next->op_code != SPDK_ACCEL_OPC_DECOMPRESS && 2063 next->op_code != SPDK_ACCEL_OPC_COPY && 2064 next->op_code != SPDK_ACCEL_OPC_ENCRYPT && 2065 next->op_code != SPDK_ACCEL_OPC_DECRYPT && 2066 next->op_code != SPDK_ACCEL_OPC_COPY_CRC32C) { 2067 break; 2068 } 2069 if (task->dst_domain != next->src_domain) { 2070 break; 2071 } 2072 if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt, 2073 next->s.iovs, next->s.iovcnt)) { 2074 break; 2075 } 2076 next->s.iovs = task->s.iovs; 2077 next->s.iovcnt = task->s.iovcnt; 2078 next->src_domain = task->src_domain; 2079 next->src_domain_ctx = task->src_domain_ctx; 2080 accel_sequence_complete_task(seq, task); 2081 break; 2082 case SPDK_ACCEL_OPC_DECOMPRESS: 2083 case SPDK_ACCEL_OPC_FILL: 2084 case SPDK_ACCEL_OPC_ENCRYPT: 2085 case SPDK_ACCEL_OPC_DECRYPT: 2086 case SPDK_ACCEL_OPC_CRC32C: 2087 /* We can only merge tasks when one of them is a copy */ 2088 if (next->op_code != SPDK_ACCEL_OPC_COPY) { 2089 break; 2090 } 2091 if (!accel_task_set_dstbuf(task, next)) { 2092 break; 2093 } 2094 /* We're removing next_task from the tasks queue, so we need to update its pointer, 2095 * so that the TAILQ_FOREACH_SAFE() loop below works correctly */ 2096 *next_task = TAILQ_NEXT(next, seq_link); 2097 accel_sequence_complete_task(seq, next); 2098 break; 2099 default: 2100 assert(0 && "bad opcode"); 2101 break; 2102 } 2103 } 2104 2105 void 2106 spdk_accel_sequence_finish(struct spdk_accel_sequence *seq, 2107 spdk_accel_completion_cb cb_fn, void *cb_arg) 2108 { 2109 struct spdk_accel_task *task, *next; 2110 2111 /* Try to remove any copy operations if possible */ 2112 TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) { 2113 if (next == NULL) { 2114 break; 2115 } 2116 accel_sequence_merge_tasks(seq, task, &next); 2117 } 2118 2119 seq->cb_fn = cb_fn; 2120 seq->cb_arg = cb_arg; 2121 2122 accel_process_sequence(seq); 2123 } 2124 2125 void 2126 spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq) 2127 { 2128 struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks); 2129 struct spdk_accel_task *task; 2130 2131 TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link); 2132 2133 while (!TAILQ_EMPTY(&tasks)) { 2134 task = TAILQ_FIRST(&tasks); 2135 TAILQ_REMOVE(&tasks, task, seq_link); 2136 TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link); 2137 } 2138 } 2139 2140 void 2141 spdk_accel_sequence_abort(struct spdk_accel_sequence *seq) 2142 { 2143 if (seq == NULL) { 2144 return; 2145 } 2146 2147 accel_sequence_complete_tasks(seq); 2148 accel_sequence_put(seq); 2149 } 2150 2151 struct spdk_memory_domain * 2152 spdk_accel_get_memory_domain(void) 2153 { 2154 return g_accel_domain; 2155 } 2156 2157 static struct spdk_accel_module_if * 2158 _module_find_by_name(const char *name) 2159 { 2160 struct spdk_accel_module_if *accel_module = NULL; 2161 2162 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 2163 if (strcmp(name, accel_module->name) == 0) { 2164 break; 2165 } 2166 } 2167 2168 return accel_module; 2169 } 2170 2171 static inline struct spdk_accel_crypto_key * 2172 _accel_crypto_key_get(const char *name) 2173 { 2174 struct spdk_accel_crypto_key *key; 2175 2176 assert(spdk_spin_held(&g_keyring_spin)); 2177 2178 TAILQ_FOREACH(key, &g_keyring, link) { 2179 if (strcmp(name, key->param.key_name) == 0) { 2180 return key; 2181 } 2182 } 2183 2184 return NULL; 2185 } 2186 2187 static void 2188 accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key) 2189 { 2190 if (key->param.hex_key) { 2191 spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2); 2192 free(key->param.hex_key); 2193 } 2194 if (key->param.hex_key2) { 2195 spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2); 2196 free(key->param.hex_key2); 2197 } 2198 free(key->param.tweak_mode); 2199 free(key->param.key_name); 2200 free(key->param.cipher); 2201 if (key->key) { 2202 spdk_memset_s(key->key, key->key_size, 0, key->key_size); 2203 free(key->key); 2204 } 2205 if (key->key2) { 2206 spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size); 2207 free(key->key2); 2208 } 2209 free(key); 2210 } 2211 2212 static void 2213 accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key) 2214 { 2215 assert(key->module_if); 2216 assert(key->module_if->crypto_key_deinit); 2217 2218 key->module_if->crypto_key_deinit(key); 2219 accel_crypto_key_free_mem(key); 2220 } 2221 2222 /* 2223 * This function mitigates a timing side channel which could be caused by using strcmp() 2224 * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in 2225 * the article [1] for more details 2226 * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html 2227 */ 2228 static bool 2229 accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len) 2230 { 2231 size_t i; 2232 volatile size_t x = k1_len ^ k2_len; 2233 2234 for (i = 0; ((i < k1_len) & (i < k2_len)); i++) { 2235 x |= k1[i] ^ k2[i]; 2236 } 2237 2238 return x == 0; 2239 } 2240 2241 static const char *g_tweak_modes[] = { 2242 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA] = "SIMPLE_LBA", 2243 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_JOIN_NEG_LBA_WITH_LBA] = "JOIN_NEG_LBA_WITH_LBA", 2244 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_FULL_LBA] = "INCR_512_FULL_LBA", 2245 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_UPPER_LBA] = "INCR_512_UPPER_LBA", 2246 }; 2247 2248 static const char *g_ciphers[] = { 2249 [SPDK_ACCEL_CIPHER_AES_CBC] = "AES_CBC", 2250 [SPDK_ACCEL_CIPHER_AES_XTS] = "AES_XTS", 2251 }; 2252 2253 int 2254 spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param) 2255 { 2256 struct spdk_accel_module_if *module; 2257 struct spdk_accel_crypto_key *key; 2258 size_t hex_key_size, hex_key2_size; 2259 bool found = false; 2260 size_t i; 2261 int rc; 2262 2263 if (!param || !param->hex_key || !param->cipher || !param->key_name) { 2264 return -EINVAL; 2265 } 2266 2267 if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) { 2268 /* hardly ever possible, but let's check and warn the user */ 2269 SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n"); 2270 } 2271 module = g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module; 2272 2273 if (!module) { 2274 SPDK_ERRLOG("No accel module found assigned for crypto operation\n"); 2275 return -ENOENT; 2276 } 2277 2278 if (!module->crypto_key_init || !module->crypto_supports_cipher) { 2279 SPDK_ERRLOG("Module %s doesn't support crypto operations\n", module->name); 2280 return -ENOTSUP; 2281 } 2282 2283 key = calloc(1, sizeof(*key)); 2284 if (!key) { 2285 return -ENOMEM; 2286 } 2287 2288 key->param.key_name = strdup(param->key_name); 2289 if (!key->param.key_name) { 2290 rc = -ENOMEM; 2291 goto error; 2292 } 2293 2294 for (i = 0; i < SPDK_COUNTOF(g_ciphers); ++i) { 2295 assert(g_ciphers[i]); 2296 2297 if (strncmp(param->cipher, g_ciphers[i], strlen(g_ciphers[i])) == 0) { 2298 key->cipher = i; 2299 found = true; 2300 break; 2301 } 2302 } 2303 2304 if (!found) { 2305 SPDK_ERRLOG("Failed to parse cipher\n"); 2306 rc = -EINVAL; 2307 goto error; 2308 } 2309 2310 key->param.cipher = strdup(param->cipher); 2311 if (!key->param.cipher) { 2312 rc = -ENOMEM; 2313 goto error; 2314 } 2315 2316 hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2317 if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) { 2318 SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2319 rc = -EINVAL; 2320 goto error; 2321 } 2322 2323 if (hex_key_size == 0) { 2324 SPDK_ERRLOG("key1 size cannot be 0\n"); 2325 rc = -EINVAL; 2326 goto error; 2327 } 2328 2329 key->param.hex_key = strdup(param->hex_key); 2330 if (!key->param.hex_key) { 2331 rc = -ENOMEM; 2332 goto error; 2333 } 2334 2335 key->key_size = hex_key_size / 2; 2336 key->key = spdk_unhexlify(key->param.hex_key); 2337 if (!key->key) { 2338 SPDK_ERRLOG("Failed to unhexlify key1\n"); 2339 rc = -EINVAL; 2340 goto error; 2341 } 2342 2343 if (param->hex_key2) { 2344 hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2345 if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) { 2346 SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2347 rc = -EINVAL; 2348 goto error; 2349 } 2350 2351 if (hex_key2_size == 0) { 2352 SPDK_ERRLOG("key2 size cannot be 0\n"); 2353 rc = -EINVAL; 2354 goto error; 2355 } 2356 2357 key->param.hex_key2 = strdup(param->hex_key2); 2358 if (!key->param.hex_key2) { 2359 rc = -ENOMEM; 2360 goto error; 2361 } 2362 2363 key->key2_size = hex_key2_size / 2; 2364 key->key2 = spdk_unhexlify(key->param.hex_key2); 2365 if (!key->key2) { 2366 SPDK_ERRLOG("Failed to unhexlify key2\n"); 2367 rc = -EINVAL; 2368 goto error; 2369 } 2370 } 2371 2372 key->tweak_mode = ACCEL_CRYPTO_TWEAK_MODE_DEFAULT; 2373 if (param->tweak_mode) { 2374 found = false; 2375 2376 key->param.tweak_mode = strdup(param->tweak_mode); 2377 if (!key->param.tweak_mode) { 2378 rc = -ENOMEM; 2379 goto error; 2380 } 2381 2382 for (i = 0; i < SPDK_COUNTOF(g_tweak_modes); ++i) { 2383 assert(g_tweak_modes[i]); 2384 2385 if (strncmp(param->tweak_mode, g_tweak_modes[i], strlen(g_tweak_modes[i])) == 0) { 2386 key->tweak_mode = i; 2387 found = true; 2388 break; 2389 } 2390 } 2391 2392 if (!found) { 2393 SPDK_ERRLOG("Failed to parse tweak mode\n"); 2394 rc = -EINVAL; 2395 goto error; 2396 } 2397 } 2398 2399 if ((!module->crypto_supports_tweak_mode && key->tweak_mode != ACCEL_CRYPTO_TWEAK_MODE_DEFAULT) || 2400 (module->crypto_supports_tweak_mode && !module->crypto_supports_tweak_mode(key->tweak_mode))) { 2401 SPDK_ERRLOG("Module %s doesn't support %s tweak mode\n", module->name, 2402 g_tweak_modes[key->tweak_mode]); 2403 rc = -EINVAL; 2404 goto error; 2405 } 2406 2407 if (!module->crypto_supports_cipher(key->cipher, key->key_size)) { 2408 SPDK_ERRLOG("Module %s doesn't support %s cipher with %zu key size\n", module->name, 2409 g_ciphers[key->cipher], key->key_size); 2410 rc = -EINVAL; 2411 goto error; 2412 } 2413 2414 if (key->cipher == SPDK_ACCEL_CIPHER_AES_XTS) { 2415 if (!key->key2) { 2416 SPDK_ERRLOG("%s key2 is missing\n", g_ciphers[key->cipher]); 2417 rc = -EINVAL; 2418 goto error; 2419 } 2420 2421 if (key->key_size != key->key2_size) { 2422 SPDK_ERRLOG("%s key size %zu is not equal to key2 size %zu\n", g_ciphers[key->cipher], 2423 key->key_size, 2424 key->key2_size); 2425 rc = -EINVAL; 2426 goto error; 2427 } 2428 2429 if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) { 2430 SPDK_ERRLOG("%s identical keys are not secure\n", g_ciphers[key->cipher]); 2431 rc = -EINVAL; 2432 goto error; 2433 } 2434 } 2435 2436 if (key->cipher == SPDK_ACCEL_CIPHER_AES_CBC) { 2437 if (key->key2_size) { 2438 SPDK_ERRLOG("%s doesn't use key2\n", g_ciphers[key->cipher]); 2439 rc = -EINVAL; 2440 goto error; 2441 } 2442 } 2443 2444 key->module_if = module; 2445 2446 spdk_spin_lock(&g_keyring_spin); 2447 if (_accel_crypto_key_get(param->key_name)) { 2448 rc = -EEXIST; 2449 } else { 2450 rc = module->crypto_key_init(key); 2451 if (rc) { 2452 SPDK_ERRLOG("Module %s failed to initialize crypto key\n", module->name); 2453 } else { 2454 TAILQ_INSERT_TAIL(&g_keyring, key, link); 2455 } 2456 } 2457 spdk_spin_unlock(&g_keyring_spin); 2458 2459 if (rc) { 2460 goto error; 2461 } 2462 2463 return 0; 2464 2465 error: 2466 accel_crypto_key_free_mem(key); 2467 return rc; 2468 } 2469 2470 int 2471 spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key) 2472 { 2473 if (!key || !key->module_if) { 2474 return -EINVAL; 2475 } 2476 2477 spdk_spin_lock(&g_keyring_spin); 2478 if (!_accel_crypto_key_get(key->param.key_name)) { 2479 spdk_spin_unlock(&g_keyring_spin); 2480 return -ENOENT; 2481 } 2482 TAILQ_REMOVE(&g_keyring, key, link); 2483 spdk_spin_unlock(&g_keyring_spin); 2484 2485 accel_crypto_key_destroy_unsafe(key); 2486 2487 return 0; 2488 } 2489 2490 struct spdk_accel_crypto_key * 2491 spdk_accel_crypto_key_get(const char *name) 2492 { 2493 struct spdk_accel_crypto_key *key; 2494 2495 spdk_spin_lock(&g_keyring_spin); 2496 key = _accel_crypto_key_get(name); 2497 spdk_spin_unlock(&g_keyring_spin); 2498 2499 return key; 2500 } 2501 2502 /* Helper function when accel modules register with the framework. */ 2503 void 2504 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module) 2505 { 2506 struct spdk_accel_module_if *tmp; 2507 2508 if (_module_find_by_name(accel_module->name)) { 2509 SPDK_NOTICELOG("Module %s already registered\n", accel_module->name); 2510 assert(false); 2511 return; 2512 } 2513 2514 TAILQ_FOREACH(tmp, &spdk_accel_module_list, tailq) { 2515 if (accel_module->priority < tmp->priority) { 2516 break; 2517 } 2518 } 2519 2520 if (tmp != NULL) { 2521 TAILQ_INSERT_BEFORE(tmp, accel_module, tailq); 2522 } else { 2523 TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq); 2524 } 2525 } 2526 2527 /* Framework level channel create callback. */ 2528 static int 2529 accel_create_channel(void *io_device, void *ctx_buf) 2530 { 2531 struct accel_io_channel *accel_ch = ctx_buf; 2532 struct spdk_accel_task *accel_task; 2533 struct spdk_accel_task_aux_data *accel_task_aux; 2534 struct spdk_accel_sequence *seq; 2535 struct accel_buffer *buf; 2536 size_t task_size_aligned; 2537 uint8_t *task_mem; 2538 uint32_t i = 0, j; 2539 int rc; 2540 2541 task_size_aligned = SPDK_ALIGN_CEIL(g_max_accel_module_size, SPDK_CACHE_LINE_SIZE); 2542 accel_ch->task_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE, 2543 g_opts.task_count * task_size_aligned); 2544 if (!accel_ch->task_pool_base) { 2545 return -ENOMEM; 2546 } 2547 memset(accel_ch->task_pool_base, 0, g_opts.task_count * task_size_aligned); 2548 2549 accel_ch->seq_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE, 2550 g_opts.sequence_count * sizeof(struct spdk_accel_sequence)); 2551 if (accel_ch->seq_pool_base == NULL) { 2552 goto err; 2553 } 2554 memset(accel_ch->seq_pool_base, 0, g_opts.sequence_count * sizeof(struct spdk_accel_sequence)); 2555 2556 accel_ch->task_aux_data_base = calloc(g_opts.task_count, sizeof(struct spdk_accel_task_aux_data)); 2557 if (accel_ch->task_aux_data_base == NULL) { 2558 goto err; 2559 } 2560 2561 accel_ch->buf_pool_base = calloc(g_opts.buf_count, sizeof(struct accel_buffer)); 2562 if (accel_ch->buf_pool_base == NULL) { 2563 goto err; 2564 } 2565 2566 STAILQ_INIT(&accel_ch->task_pool); 2567 SLIST_INIT(&accel_ch->task_aux_data_pool); 2568 SLIST_INIT(&accel_ch->seq_pool); 2569 SLIST_INIT(&accel_ch->buf_pool); 2570 2571 task_mem = accel_ch->task_pool_base; 2572 for (i = 0; i < g_opts.task_count; i++) { 2573 accel_task = (struct spdk_accel_task *)task_mem; 2574 accel_task->aux = NULL; 2575 STAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link); 2576 task_mem += task_size_aligned; 2577 accel_task_aux = &accel_ch->task_aux_data_base[i]; 2578 SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task_aux, link); 2579 } 2580 for (i = 0; i < g_opts.sequence_count; i++) { 2581 seq = &accel_ch->seq_pool_base[i]; 2582 SLIST_INSERT_HEAD(&accel_ch->seq_pool, seq, link); 2583 } 2584 for (i = 0; i < g_opts.buf_count; i++) { 2585 buf = &accel_ch->buf_pool_base[i]; 2586 SLIST_INSERT_HEAD(&accel_ch->buf_pool, buf, link); 2587 } 2588 2589 /* Assign modules and get IO channels for each */ 2590 for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) { 2591 accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel(); 2592 /* This can happen if idxd runs out of channels. */ 2593 if (accel_ch->module_ch[i] == NULL) { 2594 SPDK_ERRLOG("Module %s failed to get io channel\n", g_modules_opc[i].module->name); 2595 goto err; 2596 } 2597 } 2598 2599 if (g_accel_driver != NULL) { 2600 accel_ch->driver_channel = g_accel_driver->get_io_channel(); 2601 if (accel_ch->driver_channel == NULL) { 2602 SPDK_ERRLOG("Failed to get driver's IO channel\n"); 2603 goto err; 2604 } 2605 } 2606 2607 rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", g_opts.small_cache_size, 2608 g_opts.large_cache_size); 2609 if (rc != 0) { 2610 SPDK_ERRLOG("Failed to initialize iobuf accel channel\n"); 2611 goto err; 2612 } 2613 2614 return 0; 2615 err: 2616 if (accel_ch->driver_channel != NULL) { 2617 spdk_put_io_channel(accel_ch->driver_channel); 2618 } 2619 for (j = 0; j < i; j++) { 2620 spdk_put_io_channel(accel_ch->module_ch[j]); 2621 } 2622 free(accel_ch->task_pool_base); 2623 free(accel_ch->task_aux_data_base); 2624 free(accel_ch->seq_pool_base); 2625 free(accel_ch->buf_pool_base); 2626 2627 return -ENOMEM; 2628 } 2629 2630 static void 2631 accel_add_stats(struct accel_stats *total, struct accel_stats *stats) 2632 { 2633 int i; 2634 2635 total->sequence_executed += stats->sequence_executed; 2636 total->sequence_failed += stats->sequence_failed; 2637 total->sequence_outstanding += stats->sequence_outstanding; 2638 total->retry.task += stats->retry.task; 2639 total->retry.sequence += stats->retry.sequence; 2640 total->retry.iobuf += stats->retry.iobuf; 2641 total->retry.bufdesc += stats->retry.bufdesc; 2642 for (i = 0; i < SPDK_ACCEL_OPC_LAST; ++i) { 2643 total->operations[i].executed += stats->operations[i].executed; 2644 total->operations[i].failed += stats->operations[i].failed; 2645 total->operations[i].num_bytes += stats->operations[i].num_bytes; 2646 } 2647 } 2648 2649 /* Framework level channel destroy callback. */ 2650 static void 2651 accel_destroy_channel(void *io_device, void *ctx_buf) 2652 { 2653 struct accel_io_channel *accel_ch = ctx_buf; 2654 int i; 2655 2656 spdk_iobuf_channel_fini(&accel_ch->iobuf); 2657 2658 if (accel_ch->driver_channel != NULL) { 2659 spdk_put_io_channel(accel_ch->driver_channel); 2660 } 2661 2662 for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) { 2663 assert(accel_ch->module_ch[i] != NULL); 2664 spdk_put_io_channel(accel_ch->module_ch[i]); 2665 accel_ch->module_ch[i] = NULL; 2666 } 2667 2668 /* Update global stats to make sure channel's stats aren't lost after a channel is gone */ 2669 spdk_spin_lock(&g_stats_lock); 2670 accel_add_stats(&g_stats, &accel_ch->stats); 2671 spdk_spin_unlock(&g_stats_lock); 2672 2673 free(accel_ch->task_pool_base); 2674 free(accel_ch->task_aux_data_base); 2675 free(accel_ch->seq_pool_base); 2676 free(accel_ch->buf_pool_base); 2677 } 2678 2679 struct spdk_io_channel * 2680 spdk_accel_get_io_channel(void) 2681 { 2682 return spdk_get_io_channel(&spdk_accel_module_list); 2683 } 2684 2685 static int 2686 accel_module_initialize(void) 2687 { 2688 struct spdk_accel_module_if *accel_module, *tmp_module; 2689 int rc = 0, module_rc; 2690 2691 TAILQ_FOREACH_SAFE(accel_module, &spdk_accel_module_list, tailq, tmp_module) { 2692 module_rc = accel_module->module_init(); 2693 if (module_rc) { 2694 TAILQ_REMOVE(&spdk_accel_module_list, accel_module, tailq); 2695 if (module_rc == -ENODEV) { 2696 SPDK_NOTICELOG("No devices for module %s, skipping\n", accel_module->name); 2697 } else if (!rc) { 2698 SPDK_ERRLOG("Module %s initialization failed with %d\n", accel_module->name, module_rc); 2699 rc = module_rc; 2700 } 2701 continue; 2702 } 2703 2704 SPDK_DEBUGLOG(accel, "Module %s initialized.\n", accel_module->name); 2705 } 2706 2707 return rc; 2708 } 2709 2710 static void 2711 accel_module_init_opcode(enum spdk_accel_opcode opcode) 2712 { 2713 struct accel_module *module = &g_modules_opc[opcode]; 2714 struct spdk_accel_module_if *module_if = module->module; 2715 2716 if (module_if->get_memory_domains != NULL) { 2717 module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0; 2718 } 2719 } 2720 2721 static int 2722 accel_memory_domain_translate(struct spdk_memory_domain *src_domain, void *src_domain_ctx, 2723 struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx, 2724 void *addr, size_t len, struct spdk_memory_domain_translation_result *result) 2725 { 2726 struct accel_buffer *buf = src_domain_ctx; 2727 2728 SPDK_DEBUGLOG(accel, "translate addr %p, len %zu\n", addr, len); 2729 2730 assert(g_accel_domain == src_domain); 2731 assert(spdk_memory_domain_get_system_domain() == dst_domain); 2732 assert(buf->buf == NULL); 2733 assert(addr == ACCEL_BUFFER_BASE); 2734 assert(len == buf->len); 2735 2736 buf->buf = spdk_iobuf_get(&buf->ch->iobuf, buf->len, NULL, NULL); 2737 if (spdk_unlikely(buf->buf == NULL)) { 2738 return -ENOMEM; 2739 } 2740 2741 result->iov_count = 1; 2742 result->iov.iov_base = buf->buf; 2743 result->iov.iov_len = buf->len; 2744 SPDK_DEBUGLOG(accel, "translated addr %p\n", result->iov.iov_base); 2745 return 0; 2746 } 2747 2748 static void 2749 accel_memory_domain_invalidate(struct spdk_memory_domain *domain, void *domain_ctx, 2750 struct iovec *iov, uint32_t iovcnt) 2751 { 2752 struct accel_buffer *buf = domain_ctx; 2753 2754 SPDK_DEBUGLOG(accel, "invalidate addr %p, len %zu\n", iov[0].iov_base, iov[0].iov_len); 2755 2756 assert(g_accel_domain == domain); 2757 assert(iovcnt == 1); 2758 assert(buf->buf != NULL); 2759 assert(iov[0].iov_base == buf->buf); 2760 assert(iov[0].iov_len == buf->len); 2761 2762 spdk_iobuf_put(&buf->ch->iobuf, buf->buf, buf->len); 2763 buf->buf = NULL; 2764 } 2765 2766 int 2767 spdk_accel_initialize(void) 2768 { 2769 enum spdk_accel_opcode op; 2770 struct spdk_accel_module_if *accel_module = NULL; 2771 int rc; 2772 2773 /* 2774 * We need a unique identifier for the accel framework, so use the 2775 * spdk_accel_module_list address for this purpose. 2776 */ 2777 spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel, 2778 sizeof(struct accel_io_channel), "accel"); 2779 2780 spdk_spin_init(&g_keyring_spin); 2781 spdk_spin_init(&g_stats_lock); 2782 2783 rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL, 2784 "SPDK_ACCEL_DMA_DEVICE"); 2785 if (rc != 0) { 2786 SPDK_ERRLOG("Failed to create accel memory domain\n"); 2787 return rc; 2788 } 2789 2790 spdk_memory_domain_set_translation(g_accel_domain, accel_memory_domain_translate); 2791 spdk_memory_domain_set_invalidate(g_accel_domain, accel_memory_domain_invalidate); 2792 2793 g_modules_started = true; 2794 rc = accel_module_initialize(); 2795 if (rc) { 2796 return rc; 2797 } 2798 2799 if (g_accel_driver != NULL && g_accel_driver->init != NULL) { 2800 rc = g_accel_driver->init(); 2801 if (rc != 0) { 2802 SPDK_ERRLOG("Failed to initialize driver %s: %s\n", g_accel_driver->name, 2803 spdk_strerror(-rc)); 2804 return rc; 2805 } 2806 } 2807 2808 /* The module list is order by priority, with the highest priority modules being at the end 2809 * of the list. The software module should be somewhere at the beginning of the list, 2810 * before all HW modules. 2811 * NOTE: all opcodes must be supported by software in the event that no HW modules are 2812 * initialized to support the operation. 2813 */ 2814 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 2815 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 2816 if (accel_module->supports_opcode(op)) { 2817 g_modules_opc[op].module = accel_module; 2818 SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name); 2819 } 2820 } 2821 2822 if (accel_module->get_ctx_size != NULL) { 2823 g_max_accel_module_size = spdk_max(g_max_accel_module_size, 2824 accel_module->get_ctx_size()); 2825 } 2826 } 2827 2828 /* Now lets check for overrides and apply all that exist */ 2829 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 2830 if (g_modules_opc_override[op] != NULL) { 2831 accel_module = _module_find_by_name(g_modules_opc_override[op]); 2832 if (accel_module == NULL) { 2833 SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]); 2834 return -EINVAL; 2835 } 2836 if (accel_module->supports_opcode(op) == false) { 2837 SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op); 2838 return -EINVAL; 2839 } 2840 g_modules_opc[op].module = accel_module; 2841 } 2842 } 2843 2844 if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) { 2845 SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations"); 2846 return -EINVAL; 2847 } 2848 2849 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 2850 assert(g_modules_opc[op].module != NULL); 2851 accel_module_init_opcode(op); 2852 } 2853 2854 rc = spdk_iobuf_register_module("accel"); 2855 if (rc != 0) { 2856 SPDK_ERRLOG("Failed to register accel iobuf module\n"); 2857 return rc; 2858 } 2859 2860 return 0; 2861 } 2862 2863 static void 2864 accel_module_finish_cb(void) 2865 { 2866 spdk_accel_fini_cb cb_fn = g_fini_cb_fn; 2867 2868 cb_fn(g_fini_cb_arg); 2869 g_fini_cb_fn = NULL; 2870 g_fini_cb_arg = NULL; 2871 } 2872 2873 static void 2874 accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str, 2875 const char *module_str) 2876 { 2877 spdk_json_write_object_begin(w); 2878 spdk_json_write_named_string(w, "method", "accel_assign_opc"); 2879 spdk_json_write_named_object_begin(w, "params"); 2880 spdk_json_write_named_string(w, "opname", opc_str); 2881 spdk_json_write_named_string(w, "module", module_str); 2882 spdk_json_write_object_end(w); 2883 spdk_json_write_object_end(w); 2884 } 2885 2886 static void 2887 __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key) 2888 { 2889 spdk_json_write_named_string(w, "name", key->param.key_name); 2890 spdk_json_write_named_string(w, "cipher", key->param.cipher); 2891 spdk_json_write_named_string(w, "key", key->param.hex_key); 2892 if (key->param.hex_key2) { 2893 spdk_json_write_named_string(w, "key2", key->param.hex_key2); 2894 } 2895 2896 if (key->param.tweak_mode) { 2897 spdk_json_write_named_string(w, "tweak_mode", key->param.tweak_mode); 2898 } 2899 } 2900 2901 void 2902 _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key) 2903 { 2904 spdk_json_write_object_begin(w); 2905 __accel_crypto_key_dump_param(w, key); 2906 spdk_json_write_object_end(w); 2907 } 2908 2909 static void 2910 _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w, 2911 struct spdk_accel_crypto_key *key) 2912 { 2913 spdk_json_write_object_begin(w); 2914 spdk_json_write_named_string(w, "method", "accel_crypto_key_create"); 2915 spdk_json_write_named_object_begin(w, "params"); 2916 __accel_crypto_key_dump_param(w, key); 2917 spdk_json_write_object_end(w); 2918 spdk_json_write_object_end(w); 2919 } 2920 2921 static void 2922 accel_write_options(struct spdk_json_write_ctx *w) 2923 { 2924 spdk_json_write_object_begin(w); 2925 spdk_json_write_named_string(w, "method", "accel_set_options"); 2926 spdk_json_write_named_object_begin(w, "params"); 2927 spdk_json_write_named_uint32(w, "small_cache_size", g_opts.small_cache_size); 2928 spdk_json_write_named_uint32(w, "large_cache_size", g_opts.large_cache_size); 2929 spdk_json_write_named_uint32(w, "task_count", g_opts.task_count); 2930 spdk_json_write_named_uint32(w, "sequence_count", g_opts.sequence_count); 2931 spdk_json_write_named_uint32(w, "buf_count", g_opts.buf_count); 2932 spdk_json_write_object_end(w); 2933 spdk_json_write_object_end(w); 2934 } 2935 2936 static void 2937 _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump) 2938 { 2939 struct spdk_accel_crypto_key *key; 2940 2941 spdk_spin_lock(&g_keyring_spin); 2942 TAILQ_FOREACH(key, &g_keyring, link) { 2943 if (full_dump) { 2944 _accel_crypto_key_write_config_json(w, key); 2945 } else { 2946 _accel_crypto_key_dump_param(w, key); 2947 } 2948 } 2949 spdk_spin_unlock(&g_keyring_spin); 2950 } 2951 2952 void 2953 _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w) 2954 { 2955 _accel_crypto_keys_write_config_json(w, false); 2956 } 2957 2958 void 2959 spdk_accel_write_config_json(struct spdk_json_write_ctx *w) 2960 { 2961 struct spdk_accel_module_if *accel_module; 2962 int i; 2963 2964 spdk_json_write_array_begin(w); 2965 accel_write_options(w); 2966 2967 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 2968 if (accel_module->write_config_json) { 2969 accel_module->write_config_json(w); 2970 } 2971 } 2972 for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) { 2973 if (g_modules_opc_override[i]) { 2974 accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]); 2975 } 2976 } 2977 2978 _accel_crypto_keys_write_config_json(w, true); 2979 2980 spdk_json_write_array_end(w); 2981 } 2982 2983 void 2984 spdk_accel_module_finish(void) 2985 { 2986 if (!g_accel_module) { 2987 g_accel_module = TAILQ_FIRST(&spdk_accel_module_list); 2988 } else { 2989 g_accel_module = TAILQ_NEXT(g_accel_module, tailq); 2990 } 2991 2992 if (!g_accel_module) { 2993 if (g_accel_driver != NULL && g_accel_driver->fini != NULL) { 2994 g_accel_driver->fini(); 2995 } 2996 2997 spdk_spin_destroy(&g_keyring_spin); 2998 spdk_spin_destroy(&g_stats_lock); 2999 if (g_accel_domain) { 3000 spdk_memory_domain_destroy(g_accel_domain); 3001 g_accel_domain = NULL; 3002 } 3003 accel_module_finish_cb(); 3004 return; 3005 } 3006 3007 if (g_accel_module->module_fini) { 3008 spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL); 3009 } else { 3010 spdk_accel_module_finish(); 3011 } 3012 } 3013 3014 static void 3015 accel_io_device_unregister_cb(void *io_device) 3016 { 3017 struct spdk_accel_crypto_key *key, *key_tmp; 3018 enum spdk_accel_opcode op; 3019 3020 spdk_spin_lock(&g_keyring_spin); 3021 TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) { 3022 accel_crypto_key_destroy_unsafe(key); 3023 } 3024 spdk_spin_unlock(&g_keyring_spin); 3025 3026 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 3027 if (g_modules_opc_override[op] != NULL) { 3028 free(g_modules_opc_override[op]); 3029 g_modules_opc_override[op] = NULL; 3030 } 3031 g_modules_opc[op].module = NULL; 3032 } 3033 3034 spdk_accel_module_finish(); 3035 } 3036 3037 void 3038 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg) 3039 { 3040 assert(cb_fn != NULL); 3041 3042 g_fini_cb_fn = cb_fn; 3043 g_fini_cb_arg = cb_arg; 3044 3045 spdk_io_device_unregister(&spdk_accel_module_list, accel_io_device_unregister_cb); 3046 } 3047 3048 static struct spdk_accel_driver * 3049 accel_find_driver(const char *name) 3050 { 3051 struct spdk_accel_driver *driver; 3052 3053 TAILQ_FOREACH(driver, &g_accel_drivers, tailq) { 3054 if (strcmp(driver->name, name) == 0) { 3055 return driver; 3056 } 3057 } 3058 3059 return NULL; 3060 } 3061 3062 int 3063 spdk_accel_set_driver(const char *name) 3064 { 3065 struct spdk_accel_driver *driver; 3066 3067 driver = accel_find_driver(name); 3068 if (driver == NULL) { 3069 SPDK_ERRLOG("Couldn't find driver named '%s'\n", name); 3070 return -ENODEV; 3071 } 3072 3073 g_accel_driver = driver; 3074 3075 return 0; 3076 } 3077 3078 const char * 3079 spdk_accel_get_driver_name(void) 3080 { 3081 if (!g_accel_driver) { 3082 return NULL; 3083 } 3084 3085 return g_accel_driver->name; 3086 } 3087 3088 void 3089 spdk_accel_driver_register(struct spdk_accel_driver *driver) 3090 { 3091 if (accel_find_driver(driver->name)) { 3092 SPDK_ERRLOG("Driver named '%s' has already been registered\n", driver->name); 3093 assert(0); 3094 return; 3095 } 3096 3097 TAILQ_INSERT_TAIL(&g_accel_drivers, driver, tailq); 3098 } 3099 3100 int 3101 spdk_accel_set_opts(const struct spdk_accel_opts *opts) 3102 { 3103 if (!opts) { 3104 SPDK_ERRLOG("opts cannot be NULL\n"); 3105 return -1; 3106 } 3107 3108 if (!opts->opts_size) { 3109 SPDK_ERRLOG("opts_size inside opts cannot be zero value\n"); 3110 return -1; 3111 } 3112 3113 #define SET_FIELD(field) \ 3114 if (offsetof(struct spdk_accel_opts, field) + sizeof(opts->field) <= opts->opts_size) { \ 3115 g_opts.field = opts->field; \ 3116 } \ 3117 3118 SET_FIELD(small_cache_size); 3119 SET_FIELD(large_cache_size); 3120 SET_FIELD(task_count); 3121 SET_FIELD(sequence_count); 3122 SET_FIELD(buf_count); 3123 3124 g_opts.opts_size = opts->opts_size; 3125 3126 #undef SET_FIELD 3127 3128 return 0; 3129 } 3130 3131 void 3132 spdk_accel_get_opts(struct spdk_accel_opts *opts, size_t opts_size) 3133 { 3134 if (!opts) { 3135 SPDK_ERRLOG("opts should not be NULL\n"); 3136 return; 3137 } 3138 3139 if (!opts_size) { 3140 SPDK_ERRLOG("opts_size should not be zero value\n"); 3141 return; 3142 } 3143 3144 opts->opts_size = opts_size; 3145 3146 #define SET_FIELD(field) \ 3147 if (offsetof(struct spdk_accel_opts, field) + sizeof(opts->field) <= opts_size) { \ 3148 opts->field = g_opts.field; \ 3149 } \ 3150 3151 SET_FIELD(small_cache_size); 3152 SET_FIELD(large_cache_size); 3153 SET_FIELD(task_count); 3154 SET_FIELD(sequence_count); 3155 SET_FIELD(buf_count); 3156 3157 #undef SET_FIELD 3158 3159 /* Do not remove this statement, you should always update this statement when you adding a new field, 3160 * and do not forget to add the SET_FIELD statement for your added field. */ 3161 SPDK_STATIC_ASSERT(sizeof(struct spdk_accel_opts) == 28, "Incorrect size"); 3162 } 3163 3164 struct accel_get_stats_ctx { 3165 struct accel_stats stats; 3166 accel_get_stats_cb cb_fn; 3167 void *cb_arg; 3168 }; 3169 3170 static void 3171 accel_get_channel_stats_done(struct spdk_io_channel_iter *iter, int status) 3172 { 3173 struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter); 3174 3175 ctx->cb_fn(&ctx->stats, ctx->cb_arg); 3176 free(ctx); 3177 } 3178 3179 static void 3180 accel_get_channel_stats(struct spdk_io_channel_iter *iter) 3181 { 3182 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(iter); 3183 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 3184 struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter); 3185 3186 accel_add_stats(&ctx->stats, &accel_ch->stats); 3187 spdk_for_each_channel_continue(iter, 0); 3188 } 3189 3190 int 3191 accel_get_stats(accel_get_stats_cb cb_fn, void *cb_arg) 3192 { 3193 struct accel_get_stats_ctx *ctx; 3194 3195 ctx = calloc(1, sizeof(*ctx)); 3196 if (ctx == NULL) { 3197 return -ENOMEM; 3198 } 3199 3200 spdk_spin_lock(&g_stats_lock); 3201 accel_add_stats(&ctx->stats, &g_stats); 3202 spdk_spin_unlock(&g_stats_lock); 3203 3204 ctx->cb_fn = cb_fn; 3205 ctx->cb_arg = cb_arg; 3206 3207 spdk_for_each_channel(&spdk_accel_module_list, accel_get_channel_stats, ctx, 3208 accel_get_channel_stats_done); 3209 3210 return 0; 3211 } 3212 3213 void 3214 spdk_accel_get_opcode_stats(struct spdk_io_channel *ch, enum spdk_accel_opcode opcode, 3215 struct spdk_accel_opcode_stats *stats, size_t size) 3216 { 3217 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 3218 3219 #define FIELD_OK(field) \ 3220 offsetof(struct spdk_accel_opcode_stats, field) + sizeof(stats->field) <= size 3221 3222 #define SET_FIELD(field, value) \ 3223 if (FIELD_OK(field)) { \ 3224 stats->field = value; \ 3225 } 3226 3227 SET_FIELD(executed, accel_ch->stats.operations[opcode].executed); 3228 SET_FIELD(failed, accel_ch->stats.operations[opcode].failed); 3229 SET_FIELD(num_bytes, accel_ch->stats.operations[opcode].num_bytes); 3230 3231 #undef FIELD_OK 3232 #undef SET_FIELD 3233 } 3234 3235 uint8_t 3236 spdk_accel_get_buf_align(enum spdk_accel_opcode opcode, 3237 const struct spdk_accel_operation_exec_ctx *ctx) 3238 { 3239 struct spdk_accel_module_if *module = g_modules_opc[opcode].module; 3240 struct spdk_accel_opcode_info modinfo = {}, drvinfo = {}; 3241 3242 if (g_accel_driver != NULL && g_accel_driver->get_operation_info != NULL) { 3243 g_accel_driver->get_operation_info(opcode, ctx, &drvinfo); 3244 } 3245 3246 if (module->get_operation_info != NULL) { 3247 module->get_operation_info(opcode, ctx, &modinfo); 3248 } 3249 3250 /* If a driver is set, it'll execute most of the operations, while the rest will usually 3251 * fall back to accel_sw, which doesn't have any alignment requiremenets. However, to be 3252 * extra safe, return the max(driver, module) if a driver delegates some operations to a 3253 * hardware module. */ 3254 return spdk_max(modinfo.required_alignment, drvinfo.required_alignment); 3255 } 3256 3257 struct spdk_accel_module_if * 3258 spdk_accel_get_module(const char *name) 3259 { 3260 struct spdk_accel_module_if *module; 3261 3262 TAILQ_FOREACH(module, &spdk_accel_module_list, tailq) { 3263 if (strcmp(module->name, name) == 0) { 3264 return module; 3265 } 3266 } 3267 3268 return NULL; 3269 } 3270 3271 int 3272 spdk_accel_get_opc_memory_domains(enum spdk_accel_opcode opcode, 3273 struct spdk_memory_domain **domains, 3274 int array_size) 3275 { 3276 assert(opcode < SPDK_ACCEL_OPC_LAST); 3277 3278 if (g_modules_opc[opcode].module->get_memory_domains) { 3279 return g_modules_opc[opcode].module->get_memory_domains(domains, array_size); 3280 } 3281 3282 return 0; 3283 } 3284 3285 SPDK_LOG_REGISTER_COMPONENT(accel) 3286