1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2020 Intel Corporation. 3 * Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES. 4 * All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk/accel_module.h" 10 11 #include "accel_internal.h" 12 13 #include "spdk/dma.h" 14 #include "spdk/env.h" 15 #include "spdk/likely.h" 16 #include "spdk/log.h" 17 #include "spdk/thread.h" 18 #include "spdk/json.h" 19 #include "spdk/crc32.h" 20 #include "spdk/util.h" 21 #include "spdk/hexlify.h" 22 #include "spdk/string.h" 23 24 /* Accelerator Framework: The following provides a top level 25 * generic API for the accelerator functions defined here. Modules, 26 * such as the one in /module/accel/ioat, supply the implementation 27 * with the exception of the pure software implementation contained 28 * later in this file. 29 */ 30 31 #define ALIGN_4K 0x1000 32 #define MAX_TASKS_PER_CHANNEL 0x800 33 #define ACCEL_SMALL_CACHE_SIZE 128 34 #define ACCEL_LARGE_CACHE_SIZE 16 35 /* Set MSB, so we don't return NULL pointers as buffers */ 36 #define ACCEL_BUFFER_BASE ((void *)(1ull << 63)) 37 #define ACCEL_BUFFER_OFFSET_MASK ((uintptr_t)ACCEL_BUFFER_BASE - 1) 38 39 #define ACCEL_CRYPTO_TWEAK_MODE_DEFAULT SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA 40 41 struct accel_module { 42 struct spdk_accel_module_if *module; 43 bool supports_memory_domains; 44 }; 45 46 /* Largest context size for all accel modules */ 47 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task); 48 49 static struct spdk_accel_module_if *g_accel_module = NULL; 50 static spdk_accel_fini_cb g_fini_cb_fn = NULL; 51 static void *g_fini_cb_arg = NULL; 52 static bool g_modules_started = false; 53 static struct spdk_memory_domain *g_accel_domain; 54 55 /* Global list of registered accelerator modules */ 56 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list = 57 TAILQ_HEAD_INITIALIZER(spdk_accel_module_list); 58 59 /* Crypto keyring */ 60 static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring); 61 static struct spdk_spinlock g_keyring_spin; 62 63 /* Global array mapping capabilities to modules */ 64 static struct accel_module g_modules_opc[SPDK_ACCEL_OPC_LAST] = {}; 65 static char *g_modules_opc_override[SPDK_ACCEL_OPC_LAST] = {}; 66 TAILQ_HEAD(, spdk_accel_driver) g_accel_drivers = TAILQ_HEAD_INITIALIZER(g_accel_drivers); 67 static struct spdk_accel_driver *g_accel_driver; 68 static struct spdk_accel_opts g_opts = { 69 .small_cache_size = ACCEL_SMALL_CACHE_SIZE, 70 .large_cache_size = ACCEL_LARGE_CACHE_SIZE, 71 .task_count = MAX_TASKS_PER_CHANNEL, 72 .sequence_count = MAX_TASKS_PER_CHANNEL, 73 .buf_count = MAX_TASKS_PER_CHANNEL, 74 }; 75 static struct accel_stats g_stats; 76 static struct spdk_spinlock g_stats_lock; 77 78 static const char *g_opcode_strings[SPDK_ACCEL_OPC_LAST] = { 79 "copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c", 80 "compress", "decompress", "encrypt", "decrypt", "xor", 81 "dif_verify", "dif_verify_copy", "dif_generate", "dif_generate_copy" 82 }; 83 84 enum accel_sequence_state { 85 ACCEL_SEQUENCE_STATE_INIT, 86 ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF, 87 ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF, 88 ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF, 89 ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF, 90 ACCEL_SEQUENCE_STATE_PULL_DATA, 91 ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA, 92 ACCEL_SEQUENCE_STATE_EXEC_TASK, 93 ACCEL_SEQUENCE_STATE_AWAIT_TASK, 94 ACCEL_SEQUENCE_STATE_COMPLETE_TASK, 95 ACCEL_SEQUENCE_STATE_NEXT_TASK, 96 ACCEL_SEQUENCE_STATE_PUSH_DATA, 97 ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA, 98 ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS, 99 ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS, 100 ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS, 101 ACCEL_SEQUENCE_STATE_ERROR, 102 ACCEL_SEQUENCE_STATE_MAX, 103 }; 104 105 static const char *g_seq_states[] 106 __attribute__((unused)) = { 107 [ACCEL_SEQUENCE_STATE_INIT] = "init", 108 [ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf", 109 [ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf", 110 [ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf", 111 [ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf", 112 [ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data", 113 [ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data", 114 [ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task", 115 [ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task", 116 [ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task", 117 [ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task", 118 [ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data", 119 [ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data", 120 [ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS] = "driver-exec-tasks", 121 [ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS] = "driver-await-tasks", 122 [ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS] = "driver-complete-tasks", 123 [ACCEL_SEQUENCE_STATE_ERROR] = "error", 124 [ACCEL_SEQUENCE_STATE_MAX] = "", 125 }; 126 127 #define ACCEL_SEQUENCE_STATE_STRING(s) \ 128 (((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \ 129 ? g_seq_states[s] : "unknown") 130 131 struct accel_buffer { 132 struct spdk_accel_sequence *seq; 133 void *buf; 134 uint64_t len; 135 struct spdk_iobuf_entry iobuf; 136 spdk_accel_sequence_get_buf_cb cb_fn; 137 void *cb_ctx; 138 SLIST_ENTRY(accel_buffer) link; 139 struct accel_io_channel *ch; 140 }; 141 142 struct accel_io_channel { 143 struct spdk_io_channel *module_ch[SPDK_ACCEL_OPC_LAST]; 144 struct spdk_io_channel *driver_channel; 145 void *task_pool_base; 146 struct spdk_accel_sequence *seq_pool_base; 147 struct accel_buffer *buf_pool_base; 148 struct spdk_accel_task_aux_data *task_aux_data_base; 149 STAILQ_HEAD(, spdk_accel_task) task_pool; 150 SLIST_HEAD(, spdk_accel_task_aux_data) task_aux_data_pool; 151 SLIST_HEAD(, spdk_accel_sequence) seq_pool; 152 SLIST_HEAD(, accel_buffer) buf_pool; 153 struct spdk_iobuf_channel iobuf; 154 struct accel_stats stats; 155 }; 156 157 TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task); 158 159 struct spdk_accel_sequence { 160 struct accel_io_channel *ch; 161 struct accel_sequence_tasks tasks; 162 SLIST_HEAD(, accel_buffer) bounce_bufs; 163 int status; 164 /* state uses enum accel_sequence_state */ 165 uint8_t state; 166 bool in_process_sequence; 167 spdk_accel_completion_cb cb_fn; 168 void *cb_arg; 169 SLIST_ENTRY(spdk_accel_sequence) link; 170 }; 171 SPDK_STATIC_ASSERT(sizeof(struct spdk_accel_sequence) == 64, "invalid size"); 172 173 #define accel_update_stats(ch, event, v) \ 174 do { \ 175 (ch)->stats.event += (v); \ 176 } while (0) 177 178 #define accel_update_task_stats(ch, task, event, v) \ 179 accel_update_stats(ch, operations[(task)->op_code].event, v) 180 181 static inline void accel_sequence_task_cb(void *cb_arg, int status); 182 183 static inline void 184 accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state) 185 { 186 SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq, 187 ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state)); 188 assert(seq->state != ACCEL_SEQUENCE_STATE_ERROR || state == ACCEL_SEQUENCE_STATE_ERROR); 189 seq->state = state; 190 } 191 192 static void 193 accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status) 194 { 195 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR); 196 assert(status != 0); 197 seq->status = status; 198 } 199 200 int 201 spdk_accel_get_opc_module_name(enum spdk_accel_opcode opcode, const char **module_name) 202 { 203 if (opcode >= SPDK_ACCEL_OPC_LAST) { 204 /* invalid opcode */ 205 return -EINVAL; 206 } 207 208 if (g_modules_opc[opcode].module) { 209 *module_name = g_modules_opc[opcode].module->name; 210 } else { 211 return -ENOENT; 212 } 213 214 return 0; 215 } 216 217 void 218 _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn) 219 { 220 struct spdk_accel_module_if *accel_module; 221 enum spdk_accel_opcode opcode; 222 int j = 0; 223 224 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 225 for (opcode = 0; opcode < SPDK_ACCEL_OPC_LAST; opcode++) { 226 if (accel_module->supports_opcode(opcode)) { 227 info->ops[j] = opcode; 228 j++; 229 } 230 } 231 info->name = accel_module->name; 232 info->num_ops = j; 233 fn(info); 234 j = 0; 235 } 236 } 237 238 const char * 239 spdk_accel_get_opcode_name(enum spdk_accel_opcode opcode) 240 { 241 if (opcode < SPDK_ACCEL_OPC_LAST) { 242 return g_opcode_strings[opcode]; 243 } 244 245 return NULL; 246 } 247 248 int 249 spdk_accel_assign_opc(enum spdk_accel_opcode opcode, const char *name) 250 { 251 char *copy; 252 253 if (g_modules_started == true) { 254 /* we don't allow re-assignment once things have started */ 255 return -EINVAL; 256 } 257 258 if (opcode >= SPDK_ACCEL_OPC_LAST) { 259 /* invalid opcode */ 260 return -EINVAL; 261 } 262 263 copy = strdup(name); 264 if (copy == NULL) { 265 return -ENOMEM; 266 } 267 268 /* module selection will be validated after the framework starts. */ 269 free(g_modules_opc_override[opcode]); 270 g_modules_opc_override[opcode] = copy; 271 272 return 0; 273 } 274 275 inline static struct spdk_accel_task * 276 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg) 277 { 278 struct spdk_accel_task *accel_task; 279 280 accel_task = STAILQ_FIRST(&accel_ch->task_pool); 281 if (spdk_unlikely(accel_task == NULL)) { 282 accel_update_stats(accel_ch, retry.task, 1); 283 return NULL; 284 } 285 286 STAILQ_REMOVE_HEAD(&accel_ch->task_pool, link); 287 accel_task->link.stqe_next = NULL; 288 289 accel_task->cb_fn = cb_fn; 290 accel_task->cb_arg = cb_arg; 291 accel_task->accel_ch = accel_ch; 292 accel_task->s.iovs = NULL; 293 accel_task->d.iovs = NULL; 294 295 return accel_task; 296 } 297 298 void 299 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status) 300 { 301 struct accel_io_channel *accel_ch = accel_task->accel_ch; 302 spdk_accel_completion_cb cb_fn; 303 void *cb_arg; 304 305 accel_update_task_stats(accel_ch, accel_task, executed, 1); 306 accel_update_task_stats(accel_ch, accel_task, num_bytes, accel_task->nbytes); 307 if (spdk_unlikely(status != 0)) { 308 accel_update_task_stats(accel_ch, accel_task, failed, 1); 309 } 310 311 if (accel_task->seq) { 312 accel_sequence_task_cb(accel_task->seq, status); 313 return; 314 } 315 316 cb_fn = accel_task->cb_fn; 317 cb_arg = accel_task->cb_arg; 318 319 if (accel_task->has_aux) { 320 SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task->aux, link); 321 accel_task->aux = NULL; 322 accel_task->has_aux = false; 323 } 324 325 /* We should put the accel_task into the list firstly in order to avoid 326 * the accel task list is exhausted when there is recursive call to 327 * allocate accel_task in user's call back function (cb_fn) 328 */ 329 STAILQ_INSERT_HEAD(&accel_ch->task_pool, accel_task, link); 330 331 cb_fn(cb_arg, status); 332 } 333 334 static inline int 335 accel_submit_task(struct accel_io_channel *accel_ch, struct spdk_accel_task *task) 336 { 337 struct spdk_io_channel *module_ch = accel_ch->module_ch[task->op_code]; 338 struct spdk_accel_module_if *module = g_modules_opc[task->op_code].module; 339 int rc; 340 341 rc = module->submit_tasks(module_ch, task); 342 if (spdk_unlikely(rc != 0)) { 343 accel_update_task_stats(accel_ch, task, failed, 1); 344 } 345 346 return rc; 347 } 348 349 static inline uint64_t 350 accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt) 351 { 352 uint64_t result = 0; 353 uint32_t i; 354 355 for (i = 0; i < iovcnt; ++i) { 356 result += iovs[i].iov_len; 357 } 358 359 return result; 360 } 361 362 #define ACCEL_TASK_ALLOC_AUX_BUF(task) \ 363 do { \ 364 (task)->aux = SLIST_FIRST(&(task)->accel_ch->task_aux_data_pool); \ 365 if (spdk_unlikely(!(task)->aux)) { \ 366 SPDK_ERRLOG("Fatal problem, aux data was not allocated\n"); \ 367 STAILQ_INSERT_HEAD(&(task)->accel_ch->task_pool, (task), link); \ 368 assert(0); \ 369 return -ENOMEM; \ 370 } \ 371 SLIST_REMOVE_HEAD(&(task)->accel_ch->task_aux_data_pool, link); \ 372 (task)->has_aux = true; \ 373 } while (0) 374 375 /* Accel framework public API for copy function */ 376 int 377 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src, 378 uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg) 379 { 380 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 381 struct spdk_accel_task *accel_task; 382 383 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 384 if (spdk_unlikely(accel_task == NULL)) { 385 return -ENOMEM; 386 } 387 388 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 389 390 accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC]; 391 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 392 accel_task->d.iovs[0].iov_base = dst; 393 accel_task->d.iovs[0].iov_len = nbytes; 394 accel_task->d.iovcnt = 1; 395 accel_task->s.iovs[0].iov_base = src; 396 accel_task->s.iovs[0].iov_len = nbytes; 397 accel_task->s.iovcnt = 1; 398 accel_task->nbytes = nbytes; 399 accel_task->op_code = SPDK_ACCEL_OPC_COPY; 400 accel_task->src_domain = NULL; 401 accel_task->dst_domain = NULL; 402 403 return accel_submit_task(accel_ch, accel_task); 404 } 405 406 /* Accel framework public API for dual cast copy function */ 407 int 408 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1, 409 void *dst2, void *src, uint64_t nbytes, 410 spdk_accel_completion_cb cb_fn, void *cb_arg) 411 { 412 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 413 struct spdk_accel_task *accel_task; 414 415 if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) { 416 SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n"); 417 return -EINVAL; 418 } 419 420 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 421 if (spdk_unlikely(accel_task == NULL)) { 422 return -ENOMEM; 423 } 424 425 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 426 427 accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC]; 428 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 429 accel_task->d2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST2]; 430 accel_task->d.iovs[0].iov_base = dst1; 431 accel_task->d.iovs[0].iov_len = nbytes; 432 accel_task->d.iovcnt = 1; 433 accel_task->d2.iovs[0].iov_base = dst2; 434 accel_task->d2.iovs[0].iov_len = nbytes; 435 accel_task->d2.iovcnt = 1; 436 accel_task->s.iovs[0].iov_base = src; 437 accel_task->s.iovs[0].iov_len = nbytes; 438 accel_task->s.iovcnt = 1; 439 accel_task->nbytes = nbytes; 440 accel_task->op_code = SPDK_ACCEL_OPC_DUALCAST; 441 accel_task->src_domain = NULL; 442 accel_task->dst_domain = NULL; 443 444 return accel_submit_task(accel_ch, accel_task); 445 } 446 447 /* Accel framework public API for compare function */ 448 449 int 450 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1, 451 void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn, 452 void *cb_arg) 453 { 454 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 455 struct spdk_accel_task *accel_task; 456 457 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 458 if (spdk_unlikely(accel_task == NULL)) { 459 return -ENOMEM; 460 } 461 462 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 463 464 accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC]; 465 accel_task->s2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC2]; 466 accel_task->s.iovs[0].iov_base = src1; 467 accel_task->s.iovs[0].iov_len = nbytes; 468 accel_task->s.iovcnt = 1; 469 accel_task->s2.iovs[0].iov_base = src2; 470 accel_task->s2.iovs[0].iov_len = nbytes; 471 accel_task->s2.iovcnt = 1; 472 accel_task->nbytes = nbytes; 473 accel_task->op_code = SPDK_ACCEL_OPC_COMPARE; 474 accel_task->src_domain = NULL; 475 accel_task->dst_domain = NULL; 476 477 return accel_submit_task(accel_ch, accel_task); 478 } 479 480 /* Accel framework public API for fill function */ 481 int 482 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst, 483 uint8_t fill, uint64_t nbytes, 484 spdk_accel_completion_cb cb_fn, void *cb_arg) 485 { 486 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 487 struct spdk_accel_task *accel_task; 488 489 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 490 if (spdk_unlikely(accel_task == NULL)) { 491 return -ENOMEM; 492 } 493 494 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 495 496 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 497 accel_task->d.iovs[0].iov_base = dst; 498 accel_task->d.iovs[0].iov_len = nbytes; 499 accel_task->d.iovcnt = 1; 500 accel_task->nbytes = nbytes; 501 memset(&accel_task->fill_pattern, fill, sizeof(uint64_t)); 502 accel_task->op_code = SPDK_ACCEL_OPC_FILL; 503 accel_task->src_domain = NULL; 504 accel_task->dst_domain = NULL; 505 506 return accel_submit_task(accel_ch, accel_task); 507 } 508 509 /* Accel framework public API for CRC-32C function */ 510 int 511 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst, 512 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn, 513 void *cb_arg) 514 { 515 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 516 struct spdk_accel_task *accel_task; 517 518 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 519 if (spdk_unlikely(accel_task == NULL)) { 520 return -ENOMEM; 521 } 522 523 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 524 525 accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC]; 526 accel_task->s.iovs[0].iov_base = src; 527 accel_task->s.iovs[0].iov_len = nbytes; 528 accel_task->s.iovcnt = 1; 529 accel_task->nbytes = nbytes; 530 accel_task->crc_dst = crc_dst; 531 accel_task->seed = seed; 532 accel_task->op_code = SPDK_ACCEL_OPC_CRC32C; 533 accel_task->src_domain = NULL; 534 accel_task->dst_domain = NULL; 535 536 return accel_submit_task(accel_ch, accel_task); 537 } 538 539 /* Accel framework public API for chained CRC-32C function */ 540 int 541 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst, 542 struct iovec *iov, uint32_t iov_cnt, uint32_t seed, 543 spdk_accel_completion_cb cb_fn, void *cb_arg) 544 { 545 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 546 struct spdk_accel_task *accel_task; 547 548 if (iov == NULL) { 549 SPDK_ERRLOG("iov should not be NULL"); 550 return -EINVAL; 551 } 552 553 if (!iov_cnt) { 554 SPDK_ERRLOG("iovcnt should not be zero value\n"); 555 return -EINVAL; 556 } 557 558 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 559 if (spdk_unlikely(accel_task == NULL)) { 560 SPDK_ERRLOG("no memory\n"); 561 assert(0); 562 return -ENOMEM; 563 } 564 565 accel_task->s.iovs = iov; 566 accel_task->s.iovcnt = iov_cnt; 567 accel_task->nbytes = accel_get_iovlen(iov, iov_cnt); 568 accel_task->crc_dst = crc_dst; 569 accel_task->seed = seed; 570 accel_task->op_code = SPDK_ACCEL_OPC_CRC32C; 571 accel_task->src_domain = NULL; 572 accel_task->dst_domain = NULL; 573 574 return accel_submit_task(accel_ch, accel_task); 575 } 576 577 /* Accel framework public API for copy with CRC-32C function */ 578 int 579 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst, 580 void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes, 581 spdk_accel_completion_cb cb_fn, void *cb_arg) 582 { 583 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 584 struct spdk_accel_task *accel_task; 585 586 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 587 if (spdk_unlikely(accel_task == NULL)) { 588 return -ENOMEM; 589 } 590 591 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 592 593 accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC]; 594 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 595 accel_task->d.iovs[0].iov_base = dst; 596 accel_task->d.iovs[0].iov_len = nbytes; 597 accel_task->d.iovcnt = 1; 598 accel_task->s.iovs[0].iov_base = src; 599 accel_task->s.iovs[0].iov_len = nbytes; 600 accel_task->s.iovcnt = 1; 601 accel_task->nbytes = nbytes; 602 accel_task->crc_dst = crc_dst; 603 accel_task->seed = seed; 604 accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C; 605 accel_task->src_domain = NULL; 606 accel_task->dst_domain = NULL; 607 608 return accel_submit_task(accel_ch, accel_task); 609 } 610 611 /* Accel framework public API for chained copy + CRC-32C function */ 612 int 613 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst, 614 struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst, 615 uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg) 616 { 617 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 618 struct spdk_accel_task *accel_task; 619 uint64_t nbytes; 620 621 if (src_iovs == NULL) { 622 SPDK_ERRLOG("iov should not be NULL"); 623 return -EINVAL; 624 } 625 626 if (!iov_cnt) { 627 SPDK_ERRLOG("iovcnt should not be zero value\n"); 628 return -EINVAL; 629 } 630 631 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 632 if (spdk_unlikely(accel_task == NULL)) { 633 SPDK_ERRLOG("no memory\n"); 634 assert(0); 635 return -ENOMEM; 636 } 637 638 nbytes = accel_get_iovlen(src_iovs, iov_cnt); 639 640 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 641 642 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 643 accel_task->d.iovs[0].iov_base = dst; 644 accel_task->d.iovs[0].iov_len = nbytes; 645 accel_task->d.iovcnt = 1; 646 accel_task->s.iovs = src_iovs; 647 accel_task->s.iovcnt = iov_cnt; 648 accel_task->nbytes = nbytes; 649 accel_task->crc_dst = crc_dst; 650 accel_task->seed = seed; 651 accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C; 652 accel_task->src_domain = NULL; 653 accel_task->dst_domain = NULL; 654 655 return accel_submit_task(accel_ch, accel_task); 656 } 657 658 int 659 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes, 660 struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size, 661 spdk_accel_completion_cb cb_fn, void *cb_arg) 662 { 663 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 664 struct spdk_accel_task *accel_task; 665 666 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 667 if (spdk_unlikely(accel_task == NULL)) { 668 return -ENOMEM; 669 } 670 671 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 672 673 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 674 accel_task->d.iovs[0].iov_base = dst; 675 accel_task->d.iovs[0].iov_len = nbytes; 676 accel_task->d.iovcnt = 1; 677 accel_task->output_size = output_size; 678 accel_task->s.iovs = src_iovs; 679 accel_task->s.iovcnt = src_iovcnt; 680 accel_task->nbytes = nbytes; 681 accel_task->op_code = SPDK_ACCEL_OPC_COMPRESS; 682 accel_task->src_domain = NULL; 683 accel_task->dst_domain = NULL; 684 685 return accel_submit_task(accel_ch, accel_task); 686 } 687 688 int 689 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs, 690 size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt, 691 uint32_t *output_size, spdk_accel_completion_cb cb_fn, 692 void *cb_arg) 693 { 694 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 695 struct spdk_accel_task *accel_task; 696 697 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 698 if (spdk_unlikely(accel_task == NULL)) { 699 return -ENOMEM; 700 } 701 702 accel_task->output_size = output_size; 703 accel_task->s.iovs = src_iovs; 704 accel_task->s.iovcnt = src_iovcnt; 705 accel_task->d.iovs = dst_iovs; 706 accel_task->d.iovcnt = dst_iovcnt; 707 accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 708 accel_task->op_code = SPDK_ACCEL_OPC_DECOMPRESS; 709 accel_task->src_domain = NULL; 710 accel_task->dst_domain = NULL; 711 712 return accel_submit_task(accel_ch, accel_task); 713 } 714 715 int 716 spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key, 717 struct iovec *dst_iovs, uint32_t dst_iovcnt, 718 struct iovec *src_iovs, uint32_t src_iovcnt, 719 uint64_t iv, uint32_t block_size, 720 spdk_accel_completion_cb cb_fn, void *cb_arg) 721 { 722 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 723 struct spdk_accel_task *accel_task; 724 725 if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) { 726 return -EINVAL; 727 } 728 729 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 730 if (spdk_unlikely(accel_task == NULL)) { 731 return -ENOMEM; 732 } 733 734 accel_task->crypto_key = key; 735 accel_task->s.iovs = src_iovs; 736 accel_task->s.iovcnt = src_iovcnt; 737 accel_task->d.iovs = dst_iovs; 738 accel_task->d.iovcnt = dst_iovcnt; 739 accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 740 accel_task->iv = iv; 741 accel_task->block_size = block_size; 742 accel_task->op_code = SPDK_ACCEL_OPC_ENCRYPT; 743 accel_task->src_domain = NULL; 744 accel_task->dst_domain = NULL; 745 746 return accel_submit_task(accel_ch, accel_task); 747 } 748 749 int 750 spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key, 751 struct iovec *dst_iovs, uint32_t dst_iovcnt, 752 struct iovec *src_iovs, uint32_t src_iovcnt, 753 uint64_t iv, uint32_t block_size, 754 spdk_accel_completion_cb cb_fn, void *cb_arg) 755 { 756 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 757 struct spdk_accel_task *accel_task; 758 759 if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) { 760 return -EINVAL; 761 } 762 763 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 764 if (spdk_unlikely(accel_task == NULL)) { 765 return -ENOMEM; 766 } 767 768 accel_task->crypto_key = key; 769 accel_task->s.iovs = src_iovs; 770 accel_task->s.iovcnt = src_iovcnt; 771 accel_task->d.iovs = dst_iovs; 772 accel_task->d.iovcnt = dst_iovcnt; 773 accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 774 accel_task->iv = iv; 775 accel_task->block_size = block_size; 776 accel_task->op_code = SPDK_ACCEL_OPC_DECRYPT; 777 accel_task->src_domain = NULL; 778 accel_task->dst_domain = NULL; 779 780 return accel_submit_task(accel_ch, accel_task); 781 } 782 783 int 784 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs, 785 uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg) 786 { 787 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 788 struct spdk_accel_task *accel_task; 789 790 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 791 if (spdk_unlikely(accel_task == NULL)) { 792 return -ENOMEM; 793 } 794 795 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 796 797 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 798 accel_task->nsrcs.srcs = sources; 799 accel_task->nsrcs.cnt = nsrcs; 800 accel_task->d.iovs[0].iov_base = dst; 801 accel_task->d.iovs[0].iov_len = nbytes; 802 accel_task->d.iovcnt = 1; 803 accel_task->nbytes = nbytes; 804 accel_task->op_code = SPDK_ACCEL_OPC_XOR; 805 accel_task->src_domain = NULL; 806 accel_task->dst_domain = NULL; 807 808 return accel_submit_task(accel_ch, accel_task); 809 } 810 811 int 812 spdk_accel_submit_dif_verify(struct spdk_io_channel *ch, 813 struct iovec *iovs, size_t iovcnt, uint32_t num_blocks, 814 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err, 815 spdk_accel_completion_cb cb_fn, void *cb_arg) 816 { 817 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 818 struct spdk_accel_task *accel_task; 819 820 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 821 if (accel_task == NULL) { 822 return -ENOMEM; 823 } 824 825 accel_task->s.iovs = iovs; 826 accel_task->s.iovcnt = iovcnt; 827 accel_task->dif.ctx = ctx; 828 accel_task->dif.err = err; 829 accel_task->dif.num_blocks = num_blocks; 830 accel_task->nbytes = num_blocks * ctx->block_size; 831 accel_task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY; 832 accel_task->src_domain = NULL; 833 accel_task->dst_domain = NULL; 834 835 return accel_submit_task(accel_ch, accel_task); 836 } 837 838 int 839 spdk_accel_submit_dif_generate(struct spdk_io_channel *ch, 840 struct iovec *iovs, size_t iovcnt, uint32_t num_blocks, 841 const struct spdk_dif_ctx *ctx, 842 spdk_accel_completion_cb cb_fn, void *cb_arg) 843 { 844 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 845 struct spdk_accel_task *accel_task; 846 847 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 848 if (accel_task == NULL) { 849 return -ENOMEM; 850 } 851 852 accel_task->s.iovs = iovs; 853 accel_task->s.iovcnt = iovcnt; 854 accel_task->dif.ctx = ctx; 855 accel_task->dif.num_blocks = num_blocks; 856 accel_task->nbytes = num_blocks * ctx->block_size; 857 accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE; 858 accel_task->src_domain = NULL; 859 accel_task->dst_domain = NULL; 860 861 return accel_submit_task(accel_ch, accel_task); 862 } 863 864 int 865 spdk_accel_submit_dif_generate_copy(struct spdk_io_channel *ch, struct iovec *dst_iovs, 866 size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt, 867 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 868 spdk_accel_completion_cb cb_fn, void *cb_arg) 869 { 870 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 871 struct spdk_accel_task *accel_task; 872 873 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 874 if (accel_task == NULL) { 875 return -ENOMEM; 876 } 877 878 accel_task->s.iovs = src_iovs; 879 accel_task->s.iovcnt = src_iovcnt; 880 accel_task->d.iovs = dst_iovs; 881 accel_task->d.iovcnt = dst_iovcnt; 882 accel_task->dif.ctx = ctx; 883 accel_task->dif.num_blocks = num_blocks; 884 accel_task->nbytes = num_blocks * ctx->block_size; 885 accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE_COPY; 886 accel_task->src_domain = NULL; 887 accel_task->dst_domain = NULL; 888 889 return accel_submit_task(accel_ch, accel_task); 890 } 891 892 int 893 spdk_accel_submit_dif_verify_copy(struct spdk_io_channel *ch, 894 struct iovec *dst_iovs, size_t dst_iovcnt, 895 struct iovec *src_iovs, size_t src_iovcnt, uint32_t num_blocks, 896 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err, 897 spdk_accel_completion_cb cb_fn, void *cb_arg) 898 { 899 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 900 struct spdk_accel_task *accel_task; 901 902 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 903 if (accel_task == NULL) { 904 return -ENOMEM; 905 } 906 907 accel_task->s.iovs = src_iovs; 908 accel_task->s.iovcnt = src_iovcnt; 909 accel_task->d.iovs = dst_iovs; 910 accel_task->d.iovcnt = dst_iovcnt; 911 accel_task->dif.ctx = ctx; 912 accel_task->dif.err = err; 913 accel_task->dif.num_blocks = num_blocks; 914 accel_task->nbytes = num_blocks * ctx->block_size; 915 accel_task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY_COPY; 916 accel_task->src_domain = NULL; 917 accel_task->dst_domain = NULL; 918 919 return accel_submit_task(accel_ch, accel_task); 920 } 921 922 static inline struct accel_buffer * 923 accel_get_buf(struct accel_io_channel *ch, uint64_t len) 924 { 925 struct accel_buffer *buf; 926 927 buf = SLIST_FIRST(&ch->buf_pool); 928 if (spdk_unlikely(buf == NULL)) { 929 accel_update_stats(ch, retry.bufdesc, 1); 930 return NULL; 931 } 932 933 SLIST_REMOVE_HEAD(&ch->buf_pool, link); 934 buf->len = len; 935 buf->buf = NULL; 936 buf->seq = NULL; 937 buf->cb_fn = NULL; 938 939 return buf; 940 } 941 942 static inline void 943 accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf) 944 { 945 if (buf->buf != NULL) { 946 spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len); 947 } 948 949 SLIST_INSERT_HEAD(&ch->buf_pool, buf, link); 950 } 951 952 static inline struct spdk_accel_sequence * 953 accel_sequence_get(struct accel_io_channel *ch) 954 { 955 struct spdk_accel_sequence *seq; 956 957 seq = SLIST_FIRST(&ch->seq_pool); 958 if (spdk_unlikely(seq == NULL)) { 959 accel_update_stats(ch, retry.sequence, 1); 960 return NULL; 961 } 962 963 SLIST_REMOVE_HEAD(&ch->seq_pool, link); 964 965 TAILQ_INIT(&seq->tasks); 966 SLIST_INIT(&seq->bounce_bufs); 967 968 seq->ch = ch; 969 seq->status = 0; 970 seq->state = ACCEL_SEQUENCE_STATE_INIT; 971 seq->in_process_sequence = false; 972 973 return seq; 974 } 975 976 static inline void 977 accel_sequence_put(struct spdk_accel_sequence *seq) 978 { 979 struct accel_io_channel *ch = seq->ch; 980 struct accel_buffer *buf; 981 982 while (!SLIST_EMPTY(&seq->bounce_bufs)) { 983 buf = SLIST_FIRST(&seq->bounce_bufs); 984 SLIST_REMOVE_HEAD(&seq->bounce_bufs, link); 985 accel_put_buf(seq->ch, buf); 986 } 987 988 assert(TAILQ_EMPTY(&seq->tasks)); 989 seq->ch = NULL; 990 991 SLIST_INSERT_HEAD(&ch->seq_pool, seq, link); 992 } 993 994 static void accel_sequence_task_cb(void *cb_arg, int status); 995 996 static inline struct spdk_accel_task * 997 accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq, 998 spdk_accel_step_cb cb_fn, void *cb_arg) 999 { 1000 struct spdk_accel_task *task; 1001 1002 task = _get_task(ch, NULL, NULL); 1003 if (spdk_unlikely(task == NULL)) { 1004 return task; 1005 } 1006 1007 task->step_cb_fn = cb_fn; 1008 task->cb_arg = cb_arg; 1009 task->seq = seq; 1010 1011 return task; 1012 } 1013 1014 int 1015 spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1016 struct iovec *dst_iovs, uint32_t dst_iovcnt, 1017 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1018 struct iovec *src_iovs, uint32_t src_iovcnt, 1019 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1020 spdk_accel_step_cb cb_fn, void *cb_arg) 1021 { 1022 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1023 struct spdk_accel_task *task; 1024 struct spdk_accel_sequence *seq = *pseq; 1025 1026 if (seq == NULL) { 1027 seq = accel_sequence_get(accel_ch); 1028 if (spdk_unlikely(seq == NULL)) { 1029 return -ENOMEM; 1030 } 1031 } 1032 1033 assert(seq->ch == accel_ch); 1034 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1035 if (spdk_unlikely(task == NULL)) { 1036 if (*pseq == NULL) { 1037 accel_sequence_put(seq); 1038 } 1039 1040 return -ENOMEM; 1041 } 1042 1043 task->dst_domain = dst_domain; 1044 task->dst_domain_ctx = dst_domain_ctx; 1045 task->d.iovs = dst_iovs; 1046 task->d.iovcnt = dst_iovcnt; 1047 task->src_domain = src_domain; 1048 task->src_domain_ctx = src_domain_ctx; 1049 task->s.iovs = src_iovs; 1050 task->s.iovcnt = src_iovcnt; 1051 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1052 task->op_code = SPDK_ACCEL_OPC_COPY; 1053 1054 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1055 *pseq = seq; 1056 1057 return 0; 1058 } 1059 1060 int 1061 spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1062 void *buf, uint64_t len, 1063 struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern, 1064 spdk_accel_step_cb cb_fn, void *cb_arg) 1065 { 1066 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1067 struct spdk_accel_task *task; 1068 struct spdk_accel_sequence *seq = *pseq; 1069 1070 if (seq == NULL) { 1071 seq = accel_sequence_get(accel_ch); 1072 if (spdk_unlikely(seq == NULL)) { 1073 return -ENOMEM; 1074 } 1075 } 1076 1077 assert(seq->ch == accel_ch); 1078 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1079 if (spdk_unlikely(task == NULL)) { 1080 if (*pseq == NULL) { 1081 accel_sequence_put(seq); 1082 } 1083 1084 return -ENOMEM; 1085 } 1086 1087 memset(&task->fill_pattern, pattern, sizeof(uint64_t)); 1088 1089 task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool); 1090 if (spdk_unlikely(!task->aux)) { 1091 SPDK_ERRLOG("Fatal problem, aux data was not allocated\n"); 1092 if (*pseq == NULL) { 1093 accel_sequence_put((seq)); 1094 } 1095 STAILQ_INSERT_HEAD(&task->accel_ch->task_pool, task, link); 1096 task->seq = NULL; 1097 assert(0); 1098 return -ENOMEM; 1099 } 1100 SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link); 1101 task->has_aux = true; 1102 1103 task->d.iovs = &task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 1104 task->d.iovs[0].iov_base = buf; 1105 task->d.iovs[0].iov_len = len; 1106 task->d.iovcnt = 1; 1107 task->nbytes = len; 1108 task->src_domain = NULL; 1109 task->dst_domain = domain; 1110 task->dst_domain_ctx = domain_ctx; 1111 task->op_code = SPDK_ACCEL_OPC_FILL; 1112 1113 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1114 *pseq = seq; 1115 1116 return 0; 1117 } 1118 1119 int 1120 spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1121 struct iovec *dst_iovs, size_t dst_iovcnt, 1122 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1123 struct iovec *src_iovs, size_t src_iovcnt, 1124 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1125 spdk_accel_step_cb cb_fn, void *cb_arg) 1126 { 1127 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1128 struct spdk_accel_task *task; 1129 struct spdk_accel_sequence *seq = *pseq; 1130 1131 if (seq == NULL) { 1132 seq = accel_sequence_get(accel_ch); 1133 if (spdk_unlikely(seq == NULL)) { 1134 return -ENOMEM; 1135 } 1136 } 1137 1138 assert(seq->ch == accel_ch); 1139 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1140 if (spdk_unlikely(task == NULL)) { 1141 if (*pseq == NULL) { 1142 accel_sequence_put(seq); 1143 } 1144 1145 return -ENOMEM; 1146 } 1147 1148 /* TODO: support output_size for chaining */ 1149 task->output_size = NULL; 1150 task->dst_domain = dst_domain; 1151 task->dst_domain_ctx = dst_domain_ctx; 1152 task->d.iovs = dst_iovs; 1153 task->d.iovcnt = dst_iovcnt; 1154 task->src_domain = src_domain; 1155 task->src_domain_ctx = src_domain_ctx; 1156 task->s.iovs = src_iovs; 1157 task->s.iovcnt = src_iovcnt; 1158 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1159 task->op_code = SPDK_ACCEL_OPC_DECOMPRESS; 1160 1161 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1162 *pseq = seq; 1163 1164 return 0; 1165 } 1166 1167 int 1168 spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1169 struct spdk_accel_crypto_key *key, 1170 struct iovec *dst_iovs, uint32_t dst_iovcnt, 1171 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1172 struct iovec *src_iovs, uint32_t src_iovcnt, 1173 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1174 uint64_t iv, uint32_t block_size, 1175 spdk_accel_step_cb cb_fn, void *cb_arg) 1176 { 1177 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1178 struct spdk_accel_task *task; 1179 struct spdk_accel_sequence *seq = *pseq; 1180 1181 assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size); 1182 1183 if (seq == NULL) { 1184 seq = accel_sequence_get(accel_ch); 1185 if (spdk_unlikely(seq == NULL)) { 1186 return -ENOMEM; 1187 } 1188 } 1189 1190 assert(seq->ch == accel_ch); 1191 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1192 if (spdk_unlikely(task == NULL)) { 1193 if (*pseq == NULL) { 1194 accel_sequence_put(seq); 1195 } 1196 1197 return -ENOMEM; 1198 } 1199 1200 task->crypto_key = key; 1201 task->src_domain = src_domain; 1202 task->src_domain_ctx = src_domain_ctx; 1203 task->s.iovs = src_iovs; 1204 task->s.iovcnt = src_iovcnt; 1205 task->dst_domain = dst_domain; 1206 task->dst_domain_ctx = dst_domain_ctx; 1207 task->d.iovs = dst_iovs; 1208 task->d.iovcnt = dst_iovcnt; 1209 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1210 task->iv = iv; 1211 task->block_size = block_size; 1212 task->op_code = SPDK_ACCEL_OPC_ENCRYPT; 1213 1214 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1215 *pseq = seq; 1216 1217 return 0; 1218 } 1219 1220 int 1221 spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1222 struct spdk_accel_crypto_key *key, 1223 struct iovec *dst_iovs, uint32_t dst_iovcnt, 1224 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1225 struct iovec *src_iovs, uint32_t src_iovcnt, 1226 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1227 uint64_t iv, uint32_t block_size, 1228 spdk_accel_step_cb cb_fn, void *cb_arg) 1229 { 1230 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1231 struct spdk_accel_task *task; 1232 struct spdk_accel_sequence *seq = *pseq; 1233 1234 assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size); 1235 1236 if (seq == NULL) { 1237 seq = accel_sequence_get(accel_ch); 1238 if (spdk_unlikely(seq == NULL)) { 1239 return -ENOMEM; 1240 } 1241 } 1242 1243 assert(seq->ch == accel_ch); 1244 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1245 if (spdk_unlikely(task == NULL)) { 1246 if (*pseq == NULL) { 1247 accel_sequence_put(seq); 1248 } 1249 1250 return -ENOMEM; 1251 } 1252 1253 task->crypto_key = key; 1254 task->src_domain = src_domain; 1255 task->src_domain_ctx = src_domain_ctx; 1256 task->s.iovs = src_iovs; 1257 task->s.iovcnt = src_iovcnt; 1258 task->dst_domain = dst_domain; 1259 task->dst_domain_ctx = dst_domain_ctx; 1260 task->d.iovs = dst_iovs; 1261 task->d.iovcnt = dst_iovcnt; 1262 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1263 task->iv = iv; 1264 task->block_size = block_size; 1265 task->op_code = SPDK_ACCEL_OPC_DECRYPT; 1266 1267 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1268 *pseq = seq; 1269 1270 return 0; 1271 } 1272 1273 int 1274 spdk_accel_append_crc32c(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1275 uint32_t *dst, struct iovec *iovs, uint32_t iovcnt, 1276 struct spdk_memory_domain *domain, void *domain_ctx, 1277 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg) 1278 { 1279 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1280 struct spdk_accel_task *task; 1281 struct spdk_accel_sequence *seq = *pseq; 1282 1283 if (seq == NULL) { 1284 seq = accel_sequence_get(accel_ch); 1285 if (spdk_unlikely(seq == NULL)) { 1286 return -ENOMEM; 1287 } 1288 } 1289 1290 assert(seq->ch == accel_ch); 1291 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1292 if (spdk_unlikely(task == NULL)) { 1293 if (*pseq == NULL) { 1294 accel_sequence_put(seq); 1295 } 1296 1297 return -ENOMEM; 1298 } 1299 1300 task->s.iovs = iovs; 1301 task->s.iovcnt = iovcnt; 1302 task->src_domain = domain; 1303 task->src_domain_ctx = domain_ctx; 1304 task->nbytes = accel_get_iovlen(iovs, iovcnt); 1305 task->crc_dst = dst; 1306 task->seed = seed; 1307 task->op_code = SPDK_ACCEL_OPC_CRC32C; 1308 task->dst_domain = NULL; 1309 1310 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1311 *pseq = seq; 1312 1313 return 0; 1314 } 1315 1316 int 1317 spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf, 1318 struct spdk_memory_domain **domain, void **domain_ctx) 1319 { 1320 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1321 struct accel_buffer *accel_buf; 1322 1323 accel_buf = accel_get_buf(accel_ch, len); 1324 if (spdk_unlikely(accel_buf == NULL)) { 1325 return -ENOMEM; 1326 } 1327 1328 accel_buf->ch = accel_ch; 1329 1330 /* We always return the same pointer and identify the buffers through domain_ctx */ 1331 *buf = ACCEL_BUFFER_BASE; 1332 *domain_ctx = accel_buf; 1333 *domain = g_accel_domain; 1334 1335 return 0; 1336 } 1337 1338 void 1339 spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf, 1340 struct spdk_memory_domain *domain, void *domain_ctx) 1341 { 1342 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1343 struct accel_buffer *accel_buf = domain_ctx; 1344 1345 assert(domain == g_accel_domain); 1346 assert(buf == ACCEL_BUFFER_BASE); 1347 1348 accel_put_buf(accel_ch, accel_buf); 1349 } 1350 1351 static void 1352 accel_sequence_complete_task(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1353 { 1354 struct accel_io_channel *ch = seq->ch; 1355 spdk_accel_step_cb cb_fn; 1356 void *cb_arg; 1357 1358 TAILQ_REMOVE(&seq->tasks, task, seq_link); 1359 cb_fn = task->step_cb_fn; 1360 cb_arg = task->cb_arg; 1361 task->seq = NULL; 1362 if (task->has_aux) { 1363 SLIST_INSERT_HEAD(&ch->task_aux_data_pool, task->aux, link); 1364 task->aux = NULL; 1365 task->has_aux = false; 1366 } 1367 STAILQ_INSERT_HEAD(&ch->task_pool, task, link); 1368 if (cb_fn != NULL) { 1369 cb_fn(cb_arg); 1370 } 1371 } 1372 1373 static void 1374 accel_sequence_complete_tasks(struct spdk_accel_sequence *seq) 1375 { 1376 struct spdk_accel_task *task; 1377 1378 while (!TAILQ_EMPTY(&seq->tasks)) { 1379 task = TAILQ_FIRST(&seq->tasks); 1380 accel_sequence_complete_task(seq, task); 1381 } 1382 } 1383 1384 static void 1385 accel_sequence_complete(struct spdk_accel_sequence *seq) 1386 { 1387 spdk_accel_completion_cb cb_fn = seq->cb_fn; 1388 void *cb_arg = seq->cb_arg; 1389 int status = seq->status; 1390 1391 SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, status); 1392 1393 accel_update_stats(seq->ch, sequence_executed, 1); 1394 if (spdk_unlikely(status != 0)) { 1395 accel_update_stats(seq->ch, sequence_failed, 1); 1396 } 1397 1398 /* First notify all users that appended operations to this sequence */ 1399 accel_sequence_complete_tasks(seq); 1400 accel_sequence_put(seq); 1401 1402 /* Then notify the user that finished the sequence */ 1403 cb_fn(cb_arg, status); 1404 } 1405 1406 static void 1407 accel_update_virt_iov(struct iovec *diov, struct iovec *siov, struct accel_buffer *accel_buf) 1408 { 1409 uintptr_t offset; 1410 1411 offset = (uintptr_t)siov->iov_base & ACCEL_BUFFER_OFFSET_MASK; 1412 assert(offset < accel_buf->len); 1413 1414 diov->iov_base = (char *)accel_buf->buf + offset; 1415 diov->iov_len = siov->iov_len; 1416 } 1417 1418 static void 1419 accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf) 1420 { 1421 struct spdk_accel_task *task; 1422 struct iovec *iov; 1423 1424 /* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks 1425 * in a sequence that were using it. 1426 */ 1427 TAILQ_FOREACH(task, &seq->tasks, seq_link) { 1428 if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) { 1429 if (!task->has_aux) { 1430 task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool); 1431 assert(task->aux && "Can't allocate aux data structure"); 1432 task->has_aux = true; 1433 SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link); 1434 } 1435 1436 iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_SRC]; 1437 assert(task->s.iovcnt == 1); 1438 accel_update_virt_iov(iov, &task->s.iovs[0], buf); 1439 task->src_domain = NULL; 1440 task->s.iovs = iov; 1441 } 1442 if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) { 1443 if (!task->has_aux) { 1444 task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool); 1445 assert(task->aux && "Can't allocate aux data structure"); 1446 task->has_aux = true; 1447 SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link); 1448 } 1449 1450 iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_DST]; 1451 assert(task->d.iovcnt == 1); 1452 accel_update_virt_iov(iov, &task->d.iovs[0], buf); 1453 task->dst_domain = NULL; 1454 task->d.iovs = iov; 1455 } 1456 } 1457 } 1458 1459 static void accel_process_sequence(struct spdk_accel_sequence *seq); 1460 1461 static void 1462 accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf) 1463 { 1464 struct accel_buffer *accel_buf; 1465 1466 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1467 1468 assert(accel_buf->seq != NULL); 1469 assert(accel_buf->buf == NULL); 1470 accel_buf->buf = buf; 1471 1472 assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF); 1473 accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF); 1474 accel_sequence_set_virtbuf(accel_buf->seq, accel_buf); 1475 accel_process_sequence(accel_buf->seq); 1476 } 1477 1478 static bool 1479 accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf, 1480 spdk_iobuf_get_cb cb_fn) 1481 { 1482 struct accel_io_channel *ch = seq->ch; 1483 1484 assert(buf->seq == NULL); 1485 1486 buf->seq = seq; 1487 1488 /* Buffer might be already allocated by memory domain translation. */ 1489 if (buf->buf) { 1490 return true; 1491 } 1492 1493 buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn); 1494 if (spdk_unlikely(buf->buf == NULL)) { 1495 accel_update_stats(ch, retry.iobuf, 1); 1496 return false; 1497 } 1498 1499 return true; 1500 } 1501 1502 static bool 1503 accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1504 { 1505 /* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to 1506 * NULL */ 1507 if (task->src_domain == g_accel_domain) { 1508 if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx, 1509 accel_iobuf_get_virtbuf_cb)) { 1510 return false; 1511 } 1512 1513 accel_sequence_set_virtbuf(seq, task->src_domain_ctx); 1514 } 1515 1516 if (task->dst_domain == g_accel_domain) { 1517 if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx, 1518 accel_iobuf_get_virtbuf_cb)) { 1519 return false; 1520 } 1521 1522 accel_sequence_set_virtbuf(seq, task->dst_domain_ctx); 1523 } 1524 1525 return true; 1526 } 1527 1528 static void 1529 accel_sequence_get_buf_cb(struct spdk_iobuf_entry *entry, void *buf) 1530 { 1531 struct accel_buffer *accel_buf; 1532 1533 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1534 1535 assert(accel_buf->seq != NULL); 1536 assert(accel_buf->buf == NULL); 1537 accel_buf->buf = buf; 1538 1539 accel_sequence_set_virtbuf(accel_buf->seq, accel_buf); 1540 accel_buf->cb_fn(accel_buf->seq, accel_buf->cb_ctx); 1541 } 1542 1543 bool 1544 spdk_accel_alloc_sequence_buf(struct spdk_accel_sequence *seq, void *buf, 1545 struct spdk_memory_domain *domain, void *domain_ctx, 1546 spdk_accel_sequence_get_buf_cb cb_fn, void *cb_ctx) 1547 { 1548 struct accel_buffer *accel_buf = domain_ctx; 1549 1550 assert(domain == g_accel_domain); 1551 accel_buf->cb_fn = cb_fn; 1552 accel_buf->cb_ctx = cb_ctx; 1553 1554 if (!accel_sequence_alloc_buf(seq, accel_buf, accel_sequence_get_buf_cb)) { 1555 return false; 1556 } 1557 1558 accel_sequence_set_virtbuf(seq, accel_buf); 1559 1560 return true; 1561 } 1562 1563 struct spdk_accel_task * 1564 spdk_accel_sequence_first_task(struct spdk_accel_sequence *seq) 1565 { 1566 return TAILQ_FIRST(&seq->tasks); 1567 } 1568 1569 struct spdk_accel_task * 1570 spdk_accel_sequence_next_task(struct spdk_accel_task *task) 1571 { 1572 return TAILQ_NEXT(task, seq_link); 1573 } 1574 1575 static inline void 1576 accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs, 1577 uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx, 1578 struct accel_buffer *buf) 1579 { 1580 bounce->orig_iovs = *iovs; 1581 bounce->orig_iovcnt = *iovcnt; 1582 bounce->orig_domain = *domain; 1583 bounce->orig_domain_ctx = *domain_ctx; 1584 bounce->iov.iov_base = buf->buf; 1585 bounce->iov.iov_len = buf->len; 1586 1587 *iovs = &bounce->iov; 1588 *iovcnt = 1; 1589 *domain = NULL; 1590 } 1591 1592 static void 1593 accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf) 1594 { 1595 struct spdk_accel_task *task; 1596 struct accel_buffer *accel_buf; 1597 1598 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1599 assert(accel_buf->buf == NULL); 1600 accel_buf->buf = buf; 1601 1602 task = TAILQ_FIRST(&accel_buf->seq->tasks); 1603 assert(task != NULL); 1604 1605 assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF); 1606 accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF); 1607 assert(task->aux); 1608 assert(task->has_aux); 1609 accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain, 1610 &task->src_domain_ctx, accel_buf); 1611 accel_process_sequence(accel_buf->seq); 1612 } 1613 1614 static void 1615 accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf) 1616 { 1617 struct spdk_accel_task *task; 1618 struct accel_buffer *accel_buf; 1619 1620 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1621 assert(accel_buf->buf == NULL); 1622 accel_buf->buf = buf; 1623 1624 task = TAILQ_FIRST(&accel_buf->seq->tasks); 1625 assert(task != NULL); 1626 1627 assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF); 1628 accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF); 1629 assert(task->aux); 1630 assert(task->has_aux); 1631 accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain, 1632 &task->dst_domain_ctx, accel_buf); 1633 accel_process_sequence(accel_buf->seq); 1634 } 1635 1636 static int 1637 accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1638 { 1639 struct accel_buffer *buf; 1640 1641 if (task->src_domain != NULL) { 1642 /* By the time we're here, accel buffers should have been allocated */ 1643 assert(task->src_domain != g_accel_domain); 1644 1645 if (!task->has_aux) { 1646 task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool); 1647 if (spdk_unlikely(!task->aux)) { 1648 SPDK_ERRLOG("Can't allocate aux data structure\n"); 1649 assert(0); 1650 return -EAGAIN; 1651 } 1652 task->has_aux = true; 1653 SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link); 1654 } 1655 buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt)); 1656 if (buf == NULL) { 1657 SPDK_ERRLOG("Couldn't allocate buffer descriptor\n"); 1658 return -ENOMEM; 1659 } 1660 1661 SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link); 1662 if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) { 1663 return -EAGAIN; 1664 } 1665 1666 accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt, 1667 &task->src_domain, &task->src_domain_ctx, buf); 1668 } 1669 1670 if (task->dst_domain != NULL) { 1671 /* By the time we're here, accel buffers should have been allocated */ 1672 assert(task->dst_domain != g_accel_domain); 1673 1674 if (!task->has_aux) { 1675 task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool); 1676 if (spdk_unlikely(!task->aux)) { 1677 SPDK_ERRLOG("Can't allocate aux data structure\n"); 1678 assert(0); 1679 return -EAGAIN; 1680 } 1681 task->has_aux = true; 1682 SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link); 1683 } 1684 buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt)); 1685 if (buf == NULL) { 1686 /* The src buffer will be released when a sequence is completed */ 1687 SPDK_ERRLOG("Couldn't allocate buffer descriptor\n"); 1688 return -ENOMEM; 1689 } 1690 1691 SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link); 1692 if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) { 1693 return -EAGAIN; 1694 } 1695 1696 accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt, 1697 &task->dst_domain, &task->dst_domain_ctx, buf); 1698 } 1699 1700 return 0; 1701 } 1702 1703 static void 1704 accel_task_pull_data_cb(void *ctx, int status) 1705 { 1706 struct spdk_accel_sequence *seq = ctx; 1707 1708 assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA); 1709 if (spdk_likely(status == 0)) { 1710 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK); 1711 } else { 1712 accel_sequence_set_fail(seq, status); 1713 } 1714 1715 accel_process_sequence(seq); 1716 } 1717 1718 static void 1719 accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1720 { 1721 int rc; 1722 1723 assert(task->has_aux); 1724 assert(task->aux); 1725 assert(task->aux->bounce.s.orig_iovs != NULL); 1726 assert(task->aux->bounce.s.orig_domain != NULL); 1727 assert(task->aux->bounce.s.orig_domain != g_accel_domain); 1728 assert(!g_modules_opc[task->op_code].supports_memory_domains); 1729 1730 rc = spdk_memory_domain_pull_data(task->aux->bounce.s.orig_domain, 1731 task->aux->bounce.s.orig_domain_ctx, 1732 task->aux->bounce.s.orig_iovs, task->aux->bounce.s.orig_iovcnt, 1733 task->s.iovs, task->s.iovcnt, 1734 accel_task_pull_data_cb, seq); 1735 if (spdk_unlikely(rc != 0)) { 1736 SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n", 1737 spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc); 1738 accel_sequence_set_fail(seq, rc); 1739 } 1740 } 1741 1742 static void 1743 accel_task_push_data_cb(void *ctx, int status) 1744 { 1745 struct spdk_accel_sequence *seq = ctx; 1746 1747 assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA); 1748 if (spdk_likely(status == 0)) { 1749 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK); 1750 } else { 1751 accel_sequence_set_fail(seq, status); 1752 } 1753 1754 accel_process_sequence(seq); 1755 } 1756 1757 static void 1758 accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1759 { 1760 int rc; 1761 1762 assert(task->has_aux); 1763 assert(task->aux); 1764 assert(task->aux->bounce.d.orig_iovs != NULL); 1765 assert(task->aux->bounce.d.orig_domain != NULL); 1766 assert(task->aux->bounce.d.orig_domain != g_accel_domain); 1767 assert(!g_modules_opc[task->op_code].supports_memory_domains); 1768 1769 rc = spdk_memory_domain_push_data(task->aux->bounce.d.orig_domain, 1770 task->aux->bounce.d.orig_domain_ctx, 1771 task->aux->bounce.d.orig_iovs, task->aux->bounce.d.orig_iovcnt, 1772 task->d.iovs, task->d.iovcnt, 1773 accel_task_push_data_cb, seq); 1774 if (spdk_unlikely(rc != 0)) { 1775 SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n", 1776 spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc); 1777 accel_sequence_set_fail(seq, rc); 1778 } 1779 } 1780 1781 static void 1782 accel_process_sequence(struct spdk_accel_sequence *seq) 1783 { 1784 struct accel_io_channel *accel_ch = seq->ch; 1785 struct spdk_accel_task *task; 1786 enum accel_sequence_state state; 1787 int rc; 1788 1789 /* Prevent recursive calls to this function */ 1790 if (spdk_unlikely(seq->in_process_sequence)) { 1791 return; 1792 } 1793 seq->in_process_sequence = true; 1794 1795 task = TAILQ_FIRST(&seq->tasks); 1796 do { 1797 state = seq->state; 1798 switch (state) { 1799 case ACCEL_SEQUENCE_STATE_INIT: 1800 if (g_accel_driver != NULL) { 1801 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS); 1802 break; 1803 } 1804 /* Fall through */ 1805 case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF: 1806 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF); 1807 if (!accel_sequence_check_virtbuf(seq, task)) { 1808 /* We couldn't allocate a buffer, wait until one is available */ 1809 break; 1810 } 1811 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF); 1812 /* Fall through */ 1813 case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF: 1814 /* If a module supports memory domains, we don't need to allocate bounce 1815 * buffers */ 1816 if (g_modules_opc[task->op_code].supports_memory_domains) { 1817 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK); 1818 break; 1819 } 1820 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF); 1821 rc = accel_sequence_check_bouncebuf(seq, task); 1822 if (spdk_unlikely(rc != 0)) { 1823 /* We couldn't allocate a buffer, wait until one is available */ 1824 if (rc == -EAGAIN) { 1825 break; 1826 } 1827 accel_sequence_set_fail(seq, rc); 1828 break; 1829 } 1830 if (task->has_aux && task->s.iovs == &task->aux->bounce.s.iov) { 1831 assert(task->aux->bounce.s.orig_iovs); 1832 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA); 1833 break; 1834 } 1835 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK); 1836 /* Fall through */ 1837 case ACCEL_SEQUENCE_STATE_EXEC_TASK: 1838 SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n", 1839 g_opcode_strings[task->op_code], seq); 1840 1841 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK); 1842 rc = accel_submit_task(accel_ch, task); 1843 if (spdk_unlikely(rc != 0)) { 1844 SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n", 1845 g_opcode_strings[task->op_code], seq); 1846 accel_sequence_set_fail(seq, rc); 1847 } 1848 break; 1849 case ACCEL_SEQUENCE_STATE_PULL_DATA: 1850 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA); 1851 accel_task_pull_data(seq, task); 1852 break; 1853 case ACCEL_SEQUENCE_STATE_COMPLETE_TASK: 1854 if (task->has_aux && task->d.iovs == &task->aux->bounce.d.iov) { 1855 assert(task->aux->bounce.d.orig_iovs); 1856 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA); 1857 break; 1858 } 1859 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK); 1860 break; 1861 case ACCEL_SEQUENCE_STATE_PUSH_DATA: 1862 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA); 1863 accel_task_push_data(seq, task); 1864 break; 1865 case ACCEL_SEQUENCE_STATE_NEXT_TASK: 1866 accel_sequence_complete_task(seq, task); 1867 /* Check if there are any remaining tasks */ 1868 task = TAILQ_FIRST(&seq->tasks); 1869 if (task == NULL) { 1870 /* Immediately return here to make sure we don't touch the sequence 1871 * after it's completed */ 1872 accel_sequence_complete(seq); 1873 return; 1874 } 1875 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT); 1876 break; 1877 case ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS: 1878 assert(!TAILQ_EMPTY(&seq->tasks)); 1879 1880 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS); 1881 rc = g_accel_driver->execute_sequence(accel_ch->driver_channel, seq); 1882 if (spdk_unlikely(rc != 0)) { 1883 SPDK_ERRLOG("Failed to execute sequence: %p using driver: %s\n", 1884 seq, g_accel_driver->name); 1885 accel_sequence_set_fail(seq, rc); 1886 } 1887 break; 1888 case ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS: 1889 /* Get the task again, as the driver might have completed some tasks 1890 * synchronously */ 1891 task = TAILQ_FIRST(&seq->tasks); 1892 if (task == NULL) { 1893 /* Immediately return here to make sure we don't touch the sequence 1894 * after it's completed */ 1895 accel_sequence_complete(seq); 1896 return; 1897 } 1898 /* We don't want to execute the next task through the driver, so we 1899 * explicitly omit the INIT state here */ 1900 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF); 1901 break; 1902 case ACCEL_SEQUENCE_STATE_ERROR: 1903 /* Immediately return here to make sure we don't touch the sequence 1904 * after it's completed */ 1905 assert(seq->status != 0); 1906 accel_sequence_complete(seq); 1907 return; 1908 case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF: 1909 case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF: 1910 case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA: 1911 case ACCEL_SEQUENCE_STATE_AWAIT_TASK: 1912 case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA: 1913 case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS: 1914 break; 1915 default: 1916 assert(0 && "bad state"); 1917 break; 1918 } 1919 } while (seq->state != state); 1920 1921 seq->in_process_sequence = false; 1922 } 1923 1924 static void 1925 accel_sequence_task_cb(void *cb_arg, int status) 1926 { 1927 struct spdk_accel_sequence *seq = cb_arg; 1928 struct spdk_accel_task *task = TAILQ_FIRST(&seq->tasks); 1929 1930 switch (seq->state) { 1931 case ACCEL_SEQUENCE_STATE_AWAIT_TASK: 1932 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK); 1933 if (spdk_unlikely(status != 0)) { 1934 SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n", 1935 g_opcode_strings[task->op_code], seq); 1936 accel_sequence_set_fail(seq, status); 1937 } 1938 1939 accel_process_sequence(seq); 1940 break; 1941 case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS: 1942 assert(g_accel_driver != NULL); 1943 /* Immediately remove the task from the outstanding list to make sure the next call 1944 * to spdk_accel_sequence_first_task() doesn't return it */ 1945 accel_sequence_complete_task(seq, task); 1946 if (spdk_unlikely(status != 0)) { 1947 SPDK_ERRLOG("Failed to execute %s operation, sequence: %p through " 1948 "driver: %s\n", g_opcode_strings[task->op_code], seq, 1949 g_accel_driver->name); 1950 /* Update status without using accel_sequence_set_fail() to avoid changing 1951 * seq's state to ERROR until driver calls spdk_accel_sequence_continue() */ 1952 seq->status = status; 1953 } 1954 break; 1955 default: 1956 assert(0 && "bad state"); 1957 break; 1958 } 1959 } 1960 1961 void 1962 spdk_accel_sequence_continue(struct spdk_accel_sequence *seq) 1963 { 1964 assert(g_accel_driver != NULL); 1965 assert(seq->state == ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS); 1966 1967 if (spdk_likely(seq->status == 0)) { 1968 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS); 1969 } else { 1970 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR); 1971 } 1972 1973 accel_process_sequence(seq); 1974 } 1975 1976 static bool 1977 accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt) 1978 { 1979 /* For now, just do a dumb check that the iovecs arrays are exactly the same */ 1980 if (iovacnt != iovbcnt) { 1981 return false; 1982 } 1983 1984 return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0; 1985 } 1986 1987 static bool 1988 accel_task_set_dstbuf(struct spdk_accel_task *task, struct spdk_accel_task *next) 1989 { 1990 struct spdk_accel_task *prev; 1991 1992 switch (task->op_code) { 1993 case SPDK_ACCEL_OPC_DECOMPRESS: 1994 case SPDK_ACCEL_OPC_FILL: 1995 case SPDK_ACCEL_OPC_ENCRYPT: 1996 case SPDK_ACCEL_OPC_DECRYPT: 1997 if (task->dst_domain != next->src_domain) { 1998 return false; 1999 } 2000 if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt, 2001 next->s.iovs, next->s.iovcnt)) { 2002 return false; 2003 } 2004 task->d.iovs = next->d.iovs; 2005 task->d.iovcnt = next->d.iovcnt; 2006 task->dst_domain = next->dst_domain; 2007 task->dst_domain_ctx = next->dst_domain_ctx; 2008 break; 2009 case SPDK_ACCEL_OPC_CRC32C: 2010 /* crc32 is special, because it doesn't have a dst buffer */ 2011 if (task->src_domain != next->src_domain) { 2012 return false; 2013 } 2014 if (!accel_compare_iovs(task->s.iovs, task->s.iovcnt, 2015 next->s.iovs, next->s.iovcnt)) { 2016 return false; 2017 } 2018 /* We can only change crc32's buffer if we can change previous task's buffer */ 2019 prev = TAILQ_PREV(task, accel_sequence_tasks, seq_link); 2020 if (prev == NULL) { 2021 return false; 2022 } 2023 if (!accel_task_set_dstbuf(prev, next)) { 2024 return false; 2025 } 2026 task->s.iovs = next->d.iovs; 2027 task->s.iovcnt = next->d.iovcnt; 2028 task->src_domain = next->dst_domain; 2029 task->src_domain_ctx = next->dst_domain_ctx; 2030 break; 2031 default: 2032 return false; 2033 } 2034 2035 return true; 2036 } 2037 2038 static void 2039 accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task, 2040 struct spdk_accel_task **next_task) 2041 { 2042 struct spdk_accel_task *next = *next_task; 2043 2044 switch (task->op_code) { 2045 case SPDK_ACCEL_OPC_COPY: 2046 /* We only allow changing src of operations that actually have a src, e.g. we never 2047 * do it for fill. Theoretically, it is possible, but we'd have to be careful to 2048 * change the src of the operation after fill (which in turn could also be a fill). 2049 * So, for the sake of simplicity, skip this type of operations for now. 2050 */ 2051 if (next->op_code != SPDK_ACCEL_OPC_DECOMPRESS && 2052 next->op_code != SPDK_ACCEL_OPC_COPY && 2053 next->op_code != SPDK_ACCEL_OPC_ENCRYPT && 2054 next->op_code != SPDK_ACCEL_OPC_DECRYPT && 2055 next->op_code != SPDK_ACCEL_OPC_COPY_CRC32C) { 2056 break; 2057 } 2058 if (task->dst_domain != next->src_domain) { 2059 break; 2060 } 2061 if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt, 2062 next->s.iovs, next->s.iovcnt)) { 2063 break; 2064 } 2065 next->s.iovs = task->s.iovs; 2066 next->s.iovcnt = task->s.iovcnt; 2067 next->src_domain = task->src_domain; 2068 next->src_domain_ctx = task->src_domain_ctx; 2069 accel_sequence_complete_task(seq, task); 2070 break; 2071 case SPDK_ACCEL_OPC_DECOMPRESS: 2072 case SPDK_ACCEL_OPC_FILL: 2073 case SPDK_ACCEL_OPC_ENCRYPT: 2074 case SPDK_ACCEL_OPC_DECRYPT: 2075 case SPDK_ACCEL_OPC_CRC32C: 2076 /* We can only merge tasks when one of them is a copy */ 2077 if (next->op_code != SPDK_ACCEL_OPC_COPY) { 2078 break; 2079 } 2080 if (!accel_task_set_dstbuf(task, next)) { 2081 break; 2082 } 2083 /* We're removing next_task from the tasks queue, so we need to update its pointer, 2084 * so that the TAILQ_FOREACH_SAFE() loop below works correctly */ 2085 *next_task = TAILQ_NEXT(next, seq_link); 2086 accel_sequence_complete_task(seq, next); 2087 break; 2088 default: 2089 assert(0 && "bad opcode"); 2090 break; 2091 } 2092 } 2093 2094 void 2095 spdk_accel_sequence_finish(struct spdk_accel_sequence *seq, 2096 spdk_accel_completion_cb cb_fn, void *cb_arg) 2097 { 2098 struct spdk_accel_task *task, *next; 2099 2100 /* Try to remove any copy operations if possible */ 2101 TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) { 2102 if (next == NULL) { 2103 break; 2104 } 2105 accel_sequence_merge_tasks(seq, task, &next); 2106 } 2107 2108 seq->cb_fn = cb_fn; 2109 seq->cb_arg = cb_arg; 2110 2111 accel_process_sequence(seq); 2112 } 2113 2114 void 2115 spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq) 2116 { 2117 struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks); 2118 struct spdk_accel_task *task; 2119 2120 TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link); 2121 2122 while (!TAILQ_EMPTY(&tasks)) { 2123 task = TAILQ_FIRST(&tasks); 2124 TAILQ_REMOVE(&tasks, task, seq_link); 2125 TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link); 2126 } 2127 } 2128 2129 void 2130 spdk_accel_sequence_abort(struct spdk_accel_sequence *seq) 2131 { 2132 if (seq == NULL) { 2133 return; 2134 } 2135 2136 accel_sequence_complete_tasks(seq); 2137 accel_sequence_put(seq); 2138 } 2139 2140 struct spdk_memory_domain * 2141 spdk_accel_get_memory_domain(void) 2142 { 2143 return g_accel_domain; 2144 } 2145 2146 static struct spdk_accel_module_if * 2147 _module_find_by_name(const char *name) 2148 { 2149 struct spdk_accel_module_if *accel_module = NULL; 2150 2151 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 2152 if (strcmp(name, accel_module->name) == 0) { 2153 break; 2154 } 2155 } 2156 2157 return accel_module; 2158 } 2159 2160 static inline struct spdk_accel_crypto_key * 2161 _accel_crypto_key_get(const char *name) 2162 { 2163 struct spdk_accel_crypto_key *key; 2164 2165 assert(spdk_spin_held(&g_keyring_spin)); 2166 2167 TAILQ_FOREACH(key, &g_keyring, link) { 2168 if (strcmp(name, key->param.key_name) == 0) { 2169 return key; 2170 } 2171 } 2172 2173 return NULL; 2174 } 2175 2176 static void 2177 accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key) 2178 { 2179 if (key->param.hex_key) { 2180 spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2); 2181 free(key->param.hex_key); 2182 } 2183 if (key->param.hex_key2) { 2184 spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2); 2185 free(key->param.hex_key2); 2186 } 2187 free(key->param.tweak_mode); 2188 free(key->param.key_name); 2189 free(key->param.cipher); 2190 if (key->key) { 2191 spdk_memset_s(key->key, key->key_size, 0, key->key_size); 2192 free(key->key); 2193 } 2194 if (key->key2) { 2195 spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size); 2196 free(key->key2); 2197 } 2198 free(key); 2199 } 2200 2201 static void 2202 accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key) 2203 { 2204 assert(key->module_if); 2205 assert(key->module_if->crypto_key_deinit); 2206 2207 key->module_if->crypto_key_deinit(key); 2208 accel_crypto_key_free_mem(key); 2209 } 2210 2211 /* 2212 * This function mitigates a timing side channel which could be caused by using strcmp() 2213 * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in 2214 * the article [1] for more details 2215 * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html 2216 */ 2217 static bool 2218 accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len) 2219 { 2220 size_t i; 2221 volatile size_t x = k1_len ^ k2_len; 2222 2223 for (i = 0; ((i < k1_len) & (i < k2_len)); i++) { 2224 x |= k1[i] ^ k2[i]; 2225 } 2226 2227 return x == 0; 2228 } 2229 2230 static const char *g_tweak_modes[] = { 2231 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA] = "SIMPLE_LBA", 2232 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_JOIN_NEG_LBA_WITH_LBA] = "JOIN_NEG_LBA_WITH_LBA", 2233 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_FULL_LBA] = "INCR_512_FULL_LBA", 2234 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_UPPER_LBA] = "INCR_512_UPPER_LBA", 2235 }; 2236 2237 static const char *g_ciphers[] = { 2238 [SPDK_ACCEL_CIPHER_AES_CBC] = "AES_CBC", 2239 [SPDK_ACCEL_CIPHER_AES_XTS] = "AES_XTS", 2240 }; 2241 2242 int 2243 spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param) 2244 { 2245 struct spdk_accel_module_if *module; 2246 struct spdk_accel_crypto_key *key; 2247 size_t hex_key_size, hex_key2_size; 2248 bool found = false; 2249 size_t i; 2250 int rc; 2251 2252 if (!param || !param->hex_key || !param->cipher || !param->key_name) { 2253 return -EINVAL; 2254 } 2255 2256 if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) { 2257 /* hardly ever possible, but let's check and warn the user */ 2258 SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n"); 2259 } 2260 module = g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module; 2261 2262 if (!module) { 2263 SPDK_ERRLOG("No accel module found assigned for crypto operation\n"); 2264 return -ENOENT; 2265 } 2266 2267 if (!module->crypto_key_init || !module->crypto_supports_cipher) { 2268 SPDK_ERRLOG("Module %s doesn't support crypto operations\n", module->name); 2269 return -ENOTSUP; 2270 } 2271 2272 key = calloc(1, sizeof(*key)); 2273 if (!key) { 2274 return -ENOMEM; 2275 } 2276 2277 key->param.key_name = strdup(param->key_name); 2278 if (!key->param.key_name) { 2279 rc = -ENOMEM; 2280 goto error; 2281 } 2282 2283 for (i = 0; i < SPDK_COUNTOF(g_ciphers); ++i) { 2284 assert(g_ciphers[i]); 2285 2286 if (strncmp(param->cipher, g_ciphers[i], strlen(g_ciphers[i])) == 0) { 2287 key->cipher = i; 2288 found = true; 2289 break; 2290 } 2291 } 2292 2293 if (!found) { 2294 SPDK_ERRLOG("Failed to parse cipher\n"); 2295 rc = -EINVAL; 2296 goto error; 2297 } 2298 2299 key->param.cipher = strdup(param->cipher); 2300 if (!key->param.cipher) { 2301 rc = -ENOMEM; 2302 goto error; 2303 } 2304 2305 hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2306 if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) { 2307 SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2308 rc = -EINVAL; 2309 goto error; 2310 } 2311 2312 if (hex_key_size == 0) { 2313 SPDK_ERRLOG("key1 size cannot be 0\n"); 2314 rc = -EINVAL; 2315 goto error; 2316 } 2317 2318 key->param.hex_key = strdup(param->hex_key); 2319 if (!key->param.hex_key) { 2320 rc = -ENOMEM; 2321 goto error; 2322 } 2323 2324 key->key_size = hex_key_size / 2; 2325 key->key = spdk_unhexlify(key->param.hex_key); 2326 if (!key->key) { 2327 SPDK_ERRLOG("Failed to unhexlify key1\n"); 2328 rc = -EINVAL; 2329 goto error; 2330 } 2331 2332 if (param->hex_key2) { 2333 hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2334 if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) { 2335 SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2336 rc = -EINVAL; 2337 goto error; 2338 } 2339 2340 if (hex_key2_size == 0) { 2341 SPDK_ERRLOG("key2 size cannot be 0\n"); 2342 rc = -EINVAL; 2343 goto error; 2344 } 2345 2346 key->param.hex_key2 = strdup(param->hex_key2); 2347 if (!key->param.hex_key2) { 2348 rc = -ENOMEM; 2349 goto error; 2350 } 2351 2352 key->key2_size = hex_key2_size / 2; 2353 key->key2 = spdk_unhexlify(key->param.hex_key2); 2354 if (!key->key2) { 2355 SPDK_ERRLOG("Failed to unhexlify key2\n"); 2356 rc = -EINVAL; 2357 goto error; 2358 } 2359 } 2360 2361 key->tweak_mode = ACCEL_CRYPTO_TWEAK_MODE_DEFAULT; 2362 if (param->tweak_mode) { 2363 found = false; 2364 2365 key->param.tweak_mode = strdup(param->tweak_mode); 2366 if (!key->param.tweak_mode) { 2367 rc = -ENOMEM; 2368 goto error; 2369 } 2370 2371 for (i = 0; i < SPDK_COUNTOF(g_tweak_modes); ++i) { 2372 assert(g_tweak_modes[i]); 2373 2374 if (strncmp(param->tweak_mode, g_tweak_modes[i], strlen(g_tweak_modes[i])) == 0) { 2375 key->tweak_mode = i; 2376 found = true; 2377 break; 2378 } 2379 } 2380 2381 if (!found) { 2382 SPDK_ERRLOG("Failed to parse tweak mode\n"); 2383 rc = -EINVAL; 2384 goto error; 2385 } 2386 } 2387 2388 if ((!module->crypto_supports_tweak_mode && key->tweak_mode != ACCEL_CRYPTO_TWEAK_MODE_DEFAULT) || 2389 (module->crypto_supports_tweak_mode && !module->crypto_supports_tweak_mode(key->tweak_mode))) { 2390 SPDK_ERRLOG("Module %s doesn't support %s tweak mode\n", module->name, 2391 g_tweak_modes[key->tweak_mode]); 2392 rc = -EINVAL; 2393 goto error; 2394 } 2395 2396 if (!module->crypto_supports_cipher(key->cipher, key->key_size)) { 2397 SPDK_ERRLOG("Module %s doesn't support %s cipher with %zu key size\n", module->name, 2398 g_ciphers[key->cipher], key->key_size); 2399 rc = -EINVAL; 2400 goto error; 2401 } 2402 2403 if (key->cipher == SPDK_ACCEL_CIPHER_AES_XTS) { 2404 if (!key->key2) { 2405 SPDK_ERRLOG("%s key2 is missing\n", g_ciphers[key->cipher]); 2406 rc = -EINVAL; 2407 goto error; 2408 } 2409 2410 if (key->key_size != key->key2_size) { 2411 SPDK_ERRLOG("%s key size %zu is not equal to key2 size %zu\n", g_ciphers[key->cipher], 2412 key->key_size, 2413 key->key2_size); 2414 rc = -EINVAL; 2415 goto error; 2416 } 2417 2418 if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) { 2419 SPDK_ERRLOG("%s identical keys are not secure\n", g_ciphers[key->cipher]); 2420 rc = -EINVAL; 2421 goto error; 2422 } 2423 } 2424 2425 if (key->cipher == SPDK_ACCEL_CIPHER_AES_CBC) { 2426 if (key->key2_size) { 2427 SPDK_ERRLOG("%s doesn't use key2\n", g_ciphers[key->cipher]); 2428 rc = -EINVAL; 2429 goto error; 2430 } 2431 } 2432 2433 key->module_if = module; 2434 2435 spdk_spin_lock(&g_keyring_spin); 2436 if (_accel_crypto_key_get(param->key_name)) { 2437 rc = -EEXIST; 2438 } else { 2439 rc = module->crypto_key_init(key); 2440 if (rc) { 2441 SPDK_ERRLOG("Module %s failed to initialize crypto key\n", module->name); 2442 } else { 2443 TAILQ_INSERT_TAIL(&g_keyring, key, link); 2444 } 2445 } 2446 spdk_spin_unlock(&g_keyring_spin); 2447 2448 if (rc) { 2449 goto error; 2450 } 2451 2452 return 0; 2453 2454 error: 2455 accel_crypto_key_free_mem(key); 2456 return rc; 2457 } 2458 2459 int 2460 spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key) 2461 { 2462 if (!key || !key->module_if) { 2463 return -EINVAL; 2464 } 2465 2466 spdk_spin_lock(&g_keyring_spin); 2467 if (!_accel_crypto_key_get(key->param.key_name)) { 2468 spdk_spin_unlock(&g_keyring_spin); 2469 return -ENOENT; 2470 } 2471 TAILQ_REMOVE(&g_keyring, key, link); 2472 spdk_spin_unlock(&g_keyring_spin); 2473 2474 accel_crypto_key_destroy_unsafe(key); 2475 2476 return 0; 2477 } 2478 2479 struct spdk_accel_crypto_key * 2480 spdk_accel_crypto_key_get(const char *name) 2481 { 2482 struct spdk_accel_crypto_key *key; 2483 2484 spdk_spin_lock(&g_keyring_spin); 2485 key = _accel_crypto_key_get(name); 2486 spdk_spin_unlock(&g_keyring_spin); 2487 2488 return key; 2489 } 2490 2491 /* Helper function when accel modules register with the framework. */ 2492 void 2493 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module) 2494 { 2495 struct spdk_accel_module_if *tmp; 2496 2497 if (_module_find_by_name(accel_module->name)) { 2498 SPDK_NOTICELOG("Module %s already registered\n", accel_module->name); 2499 assert(false); 2500 return; 2501 } 2502 2503 TAILQ_FOREACH(tmp, &spdk_accel_module_list, tailq) { 2504 if (accel_module->priority < tmp->priority) { 2505 break; 2506 } 2507 } 2508 2509 if (tmp != NULL) { 2510 TAILQ_INSERT_BEFORE(tmp, accel_module, tailq); 2511 } else { 2512 TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq); 2513 } 2514 } 2515 2516 /* Framework level channel create callback. */ 2517 static int 2518 accel_create_channel(void *io_device, void *ctx_buf) 2519 { 2520 struct accel_io_channel *accel_ch = ctx_buf; 2521 struct spdk_accel_task *accel_task; 2522 struct spdk_accel_task_aux_data *accel_task_aux; 2523 struct spdk_accel_sequence *seq; 2524 struct accel_buffer *buf; 2525 size_t task_size_aligned; 2526 uint8_t *task_mem; 2527 uint32_t i = 0, j; 2528 int rc; 2529 2530 task_size_aligned = SPDK_ALIGN_CEIL(g_max_accel_module_size, SPDK_CACHE_LINE_SIZE); 2531 accel_ch->task_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE, 2532 g_opts.task_count * task_size_aligned); 2533 if (!accel_ch->task_pool_base) { 2534 return -ENOMEM; 2535 } 2536 memset(accel_ch->task_pool_base, 0, g_opts.task_count * task_size_aligned); 2537 2538 accel_ch->seq_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE, 2539 g_opts.sequence_count * sizeof(struct spdk_accel_sequence)); 2540 if (accel_ch->seq_pool_base == NULL) { 2541 goto err; 2542 } 2543 memset(accel_ch->seq_pool_base, 0, g_opts.sequence_count * sizeof(struct spdk_accel_sequence)); 2544 2545 accel_ch->task_aux_data_base = calloc(g_opts.task_count, sizeof(struct spdk_accel_task_aux_data)); 2546 if (accel_ch->task_aux_data_base == NULL) { 2547 goto err; 2548 } 2549 2550 accel_ch->buf_pool_base = calloc(g_opts.buf_count, sizeof(struct accel_buffer)); 2551 if (accel_ch->buf_pool_base == NULL) { 2552 goto err; 2553 } 2554 2555 STAILQ_INIT(&accel_ch->task_pool); 2556 SLIST_INIT(&accel_ch->task_aux_data_pool); 2557 SLIST_INIT(&accel_ch->seq_pool); 2558 SLIST_INIT(&accel_ch->buf_pool); 2559 2560 task_mem = accel_ch->task_pool_base; 2561 for (i = 0; i < g_opts.task_count; i++) { 2562 accel_task = (struct spdk_accel_task *)task_mem; 2563 accel_task->aux = NULL; 2564 STAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link); 2565 task_mem += task_size_aligned; 2566 accel_task_aux = &accel_ch->task_aux_data_base[i]; 2567 SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task_aux, link); 2568 } 2569 for (i = 0; i < g_opts.sequence_count; i++) { 2570 seq = &accel_ch->seq_pool_base[i]; 2571 SLIST_INSERT_HEAD(&accel_ch->seq_pool, seq, link); 2572 } 2573 for (i = 0; i < g_opts.buf_count; i++) { 2574 buf = &accel_ch->buf_pool_base[i]; 2575 SLIST_INSERT_HEAD(&accel_ch->buf_pool, buf, link); 2576 } 2577 2578 /* Assign modules and get IO channels for each */ 2579 for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) { 2580 accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel(); 2581 /* This can happen if idxd runs out of channels. */ 2582 if (accel_ch->module_ch[i] == NULL) { 2583 SPDK_ERRLOG("Module %s failed to get io channel\n", g_modules_opc[i].module->name); 2584 goto err; 2585 } 2586 } 2587 2588 if (g_accel_driver != NULL) { 2589 accel_ch->driver_channel = g_accel_driver->get_io_channel(); 2590 if (accel_ch->driver_channel == NULL) { 2591 SPDK_ERRLOG("Failed to get driver's IO channel\n"); 2592 goto err; 2593 } 2594 } 2595 2596 rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", g_opts.small_cache_size, 2597 g_opts.large_cache_size); 2598 if (rc != 0) { 2599 SPDK_ERRLOG("Failed to initialize iobuf accel channel\n"); 2600 goto err; 2601 } 2602 2603 return 0; 2604 err: 2605 if (accel_ch->driver_channel != NULL) { 2606 spdk_put_io_channel(accel_ch->driver_channel); 2607 } 2608 for (j = 0; j < i; j++) { 2609 spdk_put_io_channel(accel_ch->module_ch[j]); 2610 } 2611 free(accel_ch->task_pool_base); 2612 free(accel_ch->task_aux_data_base); 2613 free(accel_ch->seq_pool_base); 2614 free(accel_ch->buf_pool_base); 2615 2616 return -ENOMEM; 2617 } 2618 2619 static void 2620 accel_add_stats(struct accel_stats *total, struct accel_stats *stats) 2621 { 2622 int i; 2623 2624 total->sequence_executed += stats->sequence_executed; 2625 total->sequence_failed += stats->sequence_failed; 2626 total->retry.task += stats->retry.task; 2627 total->retry.sequence += stats->retry.sequence; 2628 total->retry.iobuf += stats->retry.iobuf; 2629 total->retry.bufdesc += stats->retry.bufdesc; 2630 for (i = 0; i < SPDK_ACCEL_OPC_LAST; ++i) { 2631 total->operations[i].executed += stats->operations[i].executed; 2632 total->operations[i].failed += stats->operations[i].failed; 2633 total->operations[i].num_bytes += stats->operations[i].num_bytes; 2634 } 2635 } 2636 2637 /* Framework level channel destroy callback. */ 2638 static void 2639 accel_destroy_channel(void *io_device, void *ctx_buf) 2640 { 2641 struct accel_io_channel *accel_ch = ctx_buf; 2642 int i; 2643 2644 spdk_iobuf_channel_fini(&accel_ch->iobuf); 2645 2646 if (accel_ch->driver_channel != NULL) { 2647 spdk_put_io_channel(accel_ch->driver_channel); 2648 } 2649 2650 for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) { 2651 assert(accel_ch->module_ch[i] != NULL); 2652 spdk_put_io_channel(accel_ch->module_ch[i]); 2653 accel_ch->module_ch[i] = NULL; 2654 } 2655 2656 /* Update global stats to make sure channel's stats aren't lost after a channel is gone */ 2657 spdk_spin_lock(&g_stats_lock); 2658 accel_add_stats(&g_stats, &accel_ch->stats); 2659 spdk_spin_unlock(&g_stats_lock); 2660 2661 free(accel_ch->task_pool_base); 2662 free(accel_ch->task_aux_data_base); 2663 free(accel_ch->seq_pool_base); 2664 free(accel_ch->buf_pool_base); 2665 } 2666 2667 struct spdk_io_channel * 2668 spdk_accel_get_io_channel(void) 2669 { 2670 return spdk_get_io_channel(&spdk_accel_module_list); 2671 } 2672 2673 static int 2674 accel_module_initialize(void) 2675 { 2676 struct spdk_accel_module_if *accel_module, *tmp_module; 2677 int rc = 0, module_rc; 2678 2679 TAILQ_FOREACH_SAFE(accel_module, &spdk_accel_module_list, tailq, tmp_module) { 2680 module_rc = accel_module->module_init(); 2681 if (module_rc) { 2682 TAILQ_REMOVE(&spdk_accel_module_list, accel_module, tailq); 2683 if (module_rc == -ENODEV) { 2684 SPDK_NOTICELOG("No devices for module %s, skipping\n", accel_module->name); 2685 } else if (!rc) { 2686 SPDK_ERRLOG("Module %s initialization failed with %d\n", accel_module->name, module_rc); 2687 rc = module_rc; 2688 } 2689 continue; 2690 } 2691 2692 SPDK_DEBUGLOG(accel, "Module %s initialized.\n", accel_module->name); 2693 } 2694 2695 return rc; 2696 } 2697 2698 static void 2699 accel_module_init_opcode(enum spdk_accel_opcode opcode) 2700 { 2701 struct accel_module *module = &g_modules_opc[opcode]; 2702 struct spdk_accel_module_if *module_if = module->module; 2703 2704 if (module_if->get_memory_domains != NULL) { 2705 module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0; 2706 } 2707 } 2708 2709 static int 2710 accel_memory_domain_translate(struct spdk_memory_domain *src_domain, void *src_domain_ctx, 2711 struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx, 2712 void *addr, size_t len, struct spdk_memory_domain_translation_result *result) 2713 { 2714 struct accel_buffer *buf = src_domain_ctx; 2715 2716 SPDK_DEBUGLOG(accel, "translate addr %p, len %zu\n", addr, len); 2717 2718 assert(g_accel_domain == src_domain); 2719 assert(spdk_memory_domain_get_system_domain() == dst_domain); 2720 assert(buf->buf == NULL); 2721 assert(addr == ACCEL_BUFFER_BASE); 2722 assert(len == buf->len); 2723 2724 buf->buf = spdk_iobuf_get(&buf->ch->iobuf, buf->len, NULL, NULL); 2725 if (spdk_unlikely(buf->buf == NULL)) { 2726 return -ENOMEM; 2727 } 2728 2729 result->iov_count = 1; 2730 result->iov.iov_base = buf->buf; 2731 result->iov.iov_len = buf->len; 2732 SPDK_DEBUGLOG(accel, "translated addr %p\n", result->iov.iov_base); 2733 return 0; 2734 } 2735 2736 static void 2737 accel_memory_domain_invalidate(struct spdk_memory_domain *domain, void *domain_ctx, 2738 struct iovec *iov, uint32_t iovcnt) 2739 { 2740 struct accel_buffer *buf = domain_ctx; 2741 2742 SPDK_DEBUGLOG(accel, "invalidate addr %p, len %zu\n", iov[0].iov_base, iov[0].iov_len); 2743 2744 assert(g_accel_domain == domain); 2745 assert(iovcnt == 1); 2746 assert(buf->buf != NULL); 2747 assert(iov[0].iov_base == buf->buf); 2748 assert(iov[0].iov_len == buf->len); 2749 2750 spdk_iobuf_put(&buf->ch->iobuf, buf->buf, buf->len); 2751 buf->buf = NULL; 2752 } 2753 2754 int 2755 spdk_accel_initialize(void) 2756 { 2757 enum spdk_accel_opcode op; 2758 struct spdk_accel_module_if *accel_module = NULL; 2759 int rc; 2760 2761 /* 2762 * We need a unique identifier for the accel framework, so use the 2763 * spdk_accel_module_list address for this purpose. 2764 */ 2765 spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel, 2766 sizeof(struct accel_io_channel), "accel"); 2767 2768 spdk_spin_init(&g_keyring_spin); 2769 spdk_spin_init(&g_stats_lock); 2770 2771 rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL, 2772 "SPDK_ACCEL_DMA_DEVICE"); 2773 if (rc != 0) { 2774 SPDK_ERRLOG("Failed to create accel memory domain\n"); 2775 return rc; 2776 } 2777 2778 spdk_memory_domain_set_translation(g_accel_domain, accel_memory_domain_translate); 2779 spdk_memory_domain_set_invalidate(g_accel_domain, accel_memory_domain_invalidate); 2780 2781 g_modules_started = true; 2782 rc = accel_module_initialize(); 2783 if (rc) { 2784 return rc; 2785 } 2786 2787 if (g_accel_driver != NULL && g_accel_driver->init != NULL) { 2788 rc = g_accel_driver->init(); 2789 if (rc != 0) { 2790 SPDK_ERRLOG("Failed to initialize driver %s: %s\n", g_accel_driver->name, 2791 spdk_strerror(-rc)); 2792 return rc; 2793 } 2794 } 2795 2796 /* The module list is order by priority, with the highest priority modules being at the end 2797 * of the list. The software module should be somewhere at the beginning of the list, 2798 * before all HW modules. 2799 * NOTE: all opcodes must be supported by software in the event that no HW modules are 2800 * initialized to support the operation. 2801 */ 2802 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 2803 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 2804 if (accel_module->supports_opcode(op)) { 2805 g_modules_opc[op].module = accel_module; 2806 SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name); 2807 } 2808 } 2809 2810 if (accel_module->get_ctx_size != NULL) { 2811 g_max_accel_module_size = spdk_max(g_max_accel_module_size, 2812 accel_module->get_ctx_size()); 2813 } 2814 } 2815 2816 /* Now lets check for overrides and apply all that exist */ 2817 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 2818 if (g_modules_opc_override[op] != NULL) { 2819 accel_module = _module_find_by_name(g_modules_opc_override[op]); 2820 if (accel_module == NULL) { 2821 SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]); 2822 return -EINVAL; 2823 } 2824 if (accel_module->supports_opcode(op) == false) { 2825 SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op); 2826 return -EINVAL; 2827 } 2828 g_modules_opc[op].module = accel_module; 2829 } 2830 } 2831 2832 if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) { 2833 SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations"); 2834 return -EINVAL; 2835 } 2836 2837 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 2838 assert(g_modules_opc[op].module != NULL); 2839 accel_module_init_opcode(op); 2840 } 2841 2842 rc = spdk_iobuf_register_module("accel"); 2843 if (rc != 0) { 2844 SPDK_ERRLOG("Failed to register accel iobuf module\n"); 2845 return rc; 2846 } 2847 2848 return 0; 2849 } 2850 2851 static void 2852 accel_module_finish_cb(void) 2853 { 2854 spdk_accel_fini_cb cb_fn = g_fini_cb_fn; 2855 2856 cb_fn(g_fini_cb_arg); 2857 g_fini_cb_fn = NULL; 2858 g_fini_cb_arg = NULL; 2859 } 2860 2861 static void 2862 accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str, 2863 const char *module_str) 2864 { 2865 spdk_json_write_object_begin(w); 2866 spdk_json_write_named_string(w, "method", "accel_assign_opc"); 2867 spdk_json_write_named_object_begin(w, "params"); 2868 spdk_json_write_named_string(w, "opname", opc_str); 2869 spdk_json_write_named_string(w, "module", module_str); 2870 spdk_json_write_object_end(w); 2871 spdk_json_write_object_end(w); 2872 } 2873 2874 static void 2875 __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key) 2876 { 2877 spdk_json_write_named_string(w, "name", key->param.key_name); 2878 spdk_json_write_named_string(w, "cipher", key->param.cipher); 2879 spdk_json_write_named_string(w, "key", key->param.hex_key); 2880 if (key->param.hex_key2) { 2881 spdk_json_write_named_string(w, "key2", key->param.hex_key2); 2882 } 2883 2884 if (key->param.tweak_mode) { 2885 spdk_json_write_named_string(w, "tweak_mode", key->param.tweak_mode); 2886 } 2887 } 2888 2889 void 2890 _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key) 2891 { 2892 spdk_json_write_object_begin(w); 2893 __accel_crypto_key_dump_param(w, key); 2894 spdk_json_write_object_end(w); 2895 } 2896 2897 static void 2898 _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w, 2899 struct spdk_accel_crypto_key *key) 2900 { 2901 spdk_json_write_object_begin(w); 2902 spdk_json_write_named_string(w, "method", "accel_crypto_key_create"); 2903 spdk_json_write_named_object_begin(w, "params"); 2904 __accel_crypto_key_dump_param(w, key); 2905 spdk_json_write_object_end(w); 2906 spdk_json_write_object_end(w); 2907 } 2908 2909 static void 2910 accel_write_options(struct spdk_json_write_ctx *w) 2911 { 2912 spdk_json_write_object_begin(w); 2913 spdk_json_write_named_string(w, "method", "accel_set_options"); 2914 spdk_json_write_named_object_begin(w, "params"); 2915 spdk_json_write_named_uint32(w, "small_cache_size", g_opts.small_cache_size); 2916 spdk_json_write_named_uint32(w, "large_cache_size", g_opts.large_cache_size); 2917 spdk_json_write_named_uint32(w, "task_count", g_opts.task_count); 2918 spdk_json_write_named_uint32(w, "sequence_count", g_opts.sequence_count); 2919 spdk_json_write_named_uint32(w, "buf_count", g_opts.buf_count); 2920 spdk_json_write_object_end(w); 2921 spdk_json_write_object_end(w); 2922 } 2923 2924 static void 2925 _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump) 2926 { 2927 struct spdk_accel_crypto_key *key; 2928 2929 spdk_spin_lock(&g_keyring_spin); 2930 TAILQ_FOREACH(key, &g_keyring, link) { 2931 if (full_dump) { 2932 _accel_crypto_key_write_config_json(w, key); 2933 } else { 2934 _accel_crypto_key_dump_param(w, key); 2935 } 2936 } 2937 spdk_spin_unlock(&g_keyring_spin); 2938 } 2939 2940 void 2941 _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w) 2942 { 2943 _accel_crypto_keys_write_config_json(w, false); 2944 } 2945 2946 void 2947 spdk_accel_write_config_json(struct spdk_json_write_ctx *w) 2948 { 2949 struct spdk_accel_module_if *accel_module; 2950 int i; 2951 2952 spdk_json_write_array_begin(w); 2953 accel_write_options(w); 2954 2955 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 2956 if (accel_module->write_config_json) { 2957 accel_module->write_config_json(w); 2958 } 2959 } 2960 for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) { 2961 if (g_modules_opc_override[i]) { 2962 accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]); 2963 } 2964 } 2965 2966 _accel_crypto_keys_write_config_json(w, true); 2967 2968 spdk_json_write_array_end(w); 2969 } 2970 2971 void 2972 spdk_accel_module_finish(void) 2973 { 2974 if (!g_accel_module) { 2975 g_accel_module = TAILQ_FIRST(&spdk_accel_module_list); 2976 } else { 2977 g_accel_module = TAILQ_NEXT(g_accel_module, tailq); 2978 } 2979 2980 if (!g_accel_module) { 2981 if (g_accel_driver != NULL && g_accel_driver->fini != NULL) { 2982 g_accel_driver->fini(); 2983 } 2984 2985 spdk_spin_destroy(&g_keyring_spin); 2986 spdk_spin_destroy(&g_stats_lock); 2987 if (g_accel_domain) { 2988 spdk_memory_domain_destroy(g_accel_domain); 2989 g_accel_domain = NULL; 2990 } 2991 accel_module_finish_cb(); 2992 return; 2993 } 2994 2995 if (g_accel_module->module_fini) { 2996 spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL); 2997 } else { 2998 spdk_accel_module_finish(); 2999 } 3000 } 3001 3002 static void 3003 accel_io_device_unregister_cb(void *io_device) 3004 { 3005 struct spdk_accel_crypto_key *key, *key_tmp; 3006 enum spdk_accel_opcode op; 3007 3008 spdk_spin_lock(&g_keyring_spin); 3009 TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) { 3010 accel_crypto_key_destroy_unsafe(key); 3011 } 3012 spdk_spin_unlock(&g_keyring_spin); 3013 3014 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 3015 if (g_modules_opc_override[op] != NULL) { 3016 free(g_modules_opc_override[op]); 3017 g_modules_opc_override[op] = NULL; 3018 } 3019 g_modules_opc[op].module = NULL; 3020 } 3021 3022 spdk_accel_module_finish(); 3023 } 3024 3025 void 3026 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg) 3027 { 3028 assert(cb_fn != NULL); 3029 3030 g_fini_cb_fn = cb_fn; 3031 g_fini_cb_arg = cb_arg; 3032 3033 spdk_io_device_unregister(&spdk_accel_module_list, accel_io_device_unregister_cb); 3034 } 3035 3036 static struct spdk_accel_driver * 3037 accel_find_driver(const char *name) 3038 { 3039 struct spdk_accel_driver *driver; 3040 3041 TAILQ_FOREACH(driver, &g_accel_drivers, tailq) { 3042 if (strcmp(driver->name, name) == 0) { 3043 return driver; 3044 } 3045 } 3046 3047 return NULL; 3048 } 3049 3050 int 3051 spdk_accel_set_driver(const char *name) 3052 { 3053 struct spdk_accel_driver *driver; 3054 3055 driver = accel_find_driver(name); 3056 if (driver == NULL) { 3057 SPDK_ERRLOG("Couldn't find driver named '%s'\n", name); 3058 return -ENODEV; 3059 } 3060 3061 g_accel_driver = driver; 3062 3063 return 0; 3064 } 3065 3066 const char * 3067 spdk_accel_get_driver_name(void) 3068 { 3069 if (!g_accel_driver) { 3070 return NULL; 3071 } 3072 3073 return g_accel_driver->name; 3074 } 3075 3076 void 3077 spdk_accel_driver_register(struct spdk_accel_driver *driver) 3078 { 3079 if (accel_find_driver(driver->name)) { 3080 SPDK_ERRLOG("Driver named '%s' has already been registered\n", driver->name); 3081 assert(0); 3082 return; 3083 } 3084 3085 TAILQ_INSERT_TAIL(&g_accel_drivers, driver, tailq); 3086 } 3087 3088 int 3089 spdk_accel_set_opts(const struct spdk_accel_opts *opts) 3090 { 3091 if (!opts) { 3092 SPDK_ERRLOG("opts cannot be NULL\n"); 3093 return -1; 3094 } 3095 3096 if (!opts->opts_size) { 3097 SPDK_ERRLOG("opts_size inside opts cannot be zero value\n"); 3098 return -1; 3099 } 3100 3101 #define SET_FIELD(field) \ 3102 if (offsetof(struct spdk_accel_opts, field) + sizeof(opts->field) <= opts->opts_size) { \ 3103 g_opts.field = opts->field; \ 3104 } \ 3105 3106 SET_FIELD(small_cache_size); 3107 SET_FIELD(large_cache_size); 3108 SET_FIELD(task_count); 3109 SET_FIELD(sequence_count); 3110 SET_FIELD(buf_count); 3111 3112 g_opts.opts_size = opts->opts_size; 3113 3114 #undef SET_FIELD 3115 3116 return 0; 3117 } 3118 3119 void 3120 spdk_accel_get_opts(struct spdk_accel_opts *opts, size_t opts_size) 3121 { 3122 if (!opts) { 3123 SPDK_ERRLOG("opts should not be NULL\n"); 3124 return; 3125 } 3126 3127 if (!opts_size) { 3128 SPDK_ERRLOG("opts_size should not be zero value\n"); 3129 return; 3130 } 3131 3132 opts->opts_size = opts_size; 3133 3134 #define SET_FIELD(field) \ 3135 if (offsetof(struct spdk_accel_opts, field) + sizeof(opts->field) <= opts_size) { \ 3136 opts->field = g_opts.field; \ 3137 } \ 3138 3139 SET_FIELD(small_cache_size); 3140 SET_FIELD(large_cache_size); 3141 SET_FIELD(task_count); 3142 SET_FIELD(sequence_count); 3143 SET_FIELD(buf_count); 3144 3145 #undef SET_FIELD 3146 3147 /* Do not remove this statement, you should always update this statement when you adding a new field, 3148 * and do not forget to add the SET_FIELD statement for your added field. */ 3149 SPDK_STATIC_ASSERT(sizeof(struct spdk_accel_opts) == 28, "Incorrect size"); 3150 } 3151 3152 struct accel_get_stats_ctx { 3153 struct accel_stats stats; 3154 accel_get_stats_cb cb_fn; 3155 void *cb_arg; 3156 }; 3157 3158 static void 3159 accel_get_channel_stats_done(struct spdk_io_channel_iter *iter, int status) 3160 { 3161 struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter); 3162 3163 ctx->cb_fn(&ctx->stats, ctx->cb_arg); 3164 free(ctx); 3165 } 3166 3167 static void 3168 accel_get_channel_stats(struct spdk_io_channel_iter *iter) 3169 { 3170 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(iter); 3171 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 3172 struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter); 3173 3174 accel_add_stats(&ctx->stats, &accel_ch->stats); 3175 spdk_for_each_channel_continue(iter, 0); 3176 } 3177 3178 int 3179 accel_get_stats(accel_get_stats_cb cb_fn, void *cb_arg) 3180 { 3181 struct accel_get_stats_ctx *ctx; 3182 3183 ctx = calloc(1, sizeof(*ctx)); 3184 if (ctx == NULL) { 3185 return -ENOMEM; 3186 } 3187 3188 spdk_spin_lock(&g_stats_lock); 3189 accel_add_stats(&ctx->stats, &g_stats); 3190 spdk_spin_unlock(&g_stats_lock); 3191 3192 ctx->cb_fn = cb_fn; 3193 ctx->cb_arg = cb_arg; 3194 3195 spdk_for_each_channel(&spdk_accel_module_list, accel_get_channel_stats, ctx, 3196 accel_get_channel_stats_done); 3197 3198 return 0; 3199 } 3200 3201 void 3202 spdk_accel_get_opcode_stats(struct spdk_io_channel *ch, enum spdk_accel_opcode opcode, 3203 struct spdk_accel_opcode_stats *stats, size_t size) 3204 { 3205 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 3206 3207 #define FIELD_OK(field) \ 3208 offsetof(struct spdk_accel_opcode_stats, field) + sizeof(stats->field) <= size 3209 3210 #define SET_FIELD(field, value) \ 3211 if (FIELD_OK(field)) { \ 3212 stats->field = value; \ 3213 } 3214 3215 SET_FIELD(executed, accel_ch->stats.operations[opcode].executed); 3216 SET_FIELD(failed, accel_ch->stats.operations[opcode].failed); 3217 SET_FIELD(num_bytes, accel_ch->stats.operations[opcode].num_bytes); 3218 3219 #undef FIELD_OK 3220 #undef SET_FIELD 3221 } 3222 3223 uint8_t 3224 spdk_accel_get_buf_align(enum spdk_accel_opcode opcode, 3225 const struct spdk_accel_operation_exec_ctx *ctx) 3226 { 3227 struct spdk_accel_module_if *module = g_modules_opc[opcode].module; 3228 struct spdk_accel_opcode_info modinfo = {}, drvinfo = {}; 3229 3230 if (g_accel_driver != NULL && g_accel_driver->get_operation_info != NULL) { 3231 g_accel_driver->get_operation_info(opcode, ctx, &drvinfo); 3232 } 3233 3234 if (module->get_operation_info != NULL) { 3235 module->get_operation_info(opcode, ctx, &modinfo); 3236 } 3237 3238 /* If a driver is set, it'll execute most of the operations, while the rest will usually 3239 * fall back to accel_sw, which doesn't have any alignment requiremenets. However, to be 3240 * extra safe, return the max(driver, module) if a driver delegates some operations to a 3241 * hardware module. */ 3242 return spdk_max(modinfo.required_alignment, drvinfo.required_alignment); 3243 } 3244 3245 struct spdk_accel_module_if * 3246 spdk_accel_get_module(const char *name) 3247 { 3248 struct spdk_accel_module_if *module; 3249 3250 TAILQ_FOREACH(module, &spdk_accel_module_list, tailq) { 3251 if (strcmp(module->name, name) == 0) { 3252 return module; 3253 } 3254 } 3255 3256 return NULL; 3257 } 3258 3259 int 3260 spdk_accel_get_opc_memory_domains(enum spdk_accel_opcode opcode, 3261 struct spdk_memory_domain **domains, 3262 int array_size) 3263 { 3264 assert(opcode < SPDK_ACCEL_OPC_LAST); 3265 3266 if (g_modules_opc[opcode].module->get_memory_domains) { 3267 return g_modules_opc[opcode].module->get_memory_domains(domains, array_size); 3268 } 3269 3270 return 0; 3271 } 3272 3273 SPDK_LOG_REGISTER_COMPONENT(accel) 3274