1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2020 Intel Corporation. 3 * Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES. 4 * All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk/accel_module.h" 10 11 #include "accel_internal.h" 12 13 #include "spdk/dma.h" 14 #include "spdk/env.h" 15 #include "spdk/likely.h" 16 #include "spdk/log.h" 17 #include "spdk/thread.h" 18 #include "spdk/json.h" 19 #include "spdk/crc32.h" 20 #include "spdk/util.h" 21 #include "spdk/hexlify.h" 22 #include "spdk/string.h" 23 24 /* Accelerator Framework: The following provides a top level 25 * generic API for the accelerator functions defined here. Modules, 26 * such as the one in /module/accel/ioat, supply the implementation 27 * with the exception of the pure software implementation contained 28 * later in this file. 29 */ 30 31 #define ALIGN_4K 0x1000 32 #define MAX_TASKS_PER_CHANNEL 0x800 33 #define ACCEL_SMALL_CACHE_SIZE 128 34 #define ACCEL_LARGE_CACHE_SIZE 16 35 /* Set MSB, so we don't return NULL pointers as buffers */ 36 #define ACCEL_BUFFER_BASE ((void *)(1ull << 63)) 37 #define ACCEL_BUFFER_OFFSET_MASK ((uintptr_t)ACCEL_BUFFER_BASE - 1) 38 39 #define ACCEL_CRYPTO_TWEAK_MODE_DEFAULT SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA 40 41 struct accel_module { 42 struct spdk_accel_module_if *module; 43 bool supports_memory_domains; 44 }; 45 46 /* Largest context size for all accel modules */ 47 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task); 48 49 static struct spdk_accel_module_if *g_accel_module = NULL; 50 static spdk_accel_fini_cb g_fini_cb_fn = NULL; 51 static void *g_fini_cb_arg = NULL; 52 static bool g_modules_started = false; 53 static struct spdk_memory_domain *g_accel_domain; 54 55 /* Global list of registered accelerator modules */ 56 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list = 57 TAILQ_HEAD_INITIALIZER(spdk_accel_module_list); 58 59 /* Crypto keyring */ 60 static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring); 61 static struct spdk_spinlock g_keyring_spin; 62 63 /* Global array mapping capabilities to modules */ 64 static struct accel_module g_modules_opc[SPDK_ACCEL_OPC_LAST] = {}; 65 static char *g_modules_opc_override[SPDK_ACCEL_OPC_LAST] = {}; 66 TAILQ_HEAD(, spdk_accel_driver) g_accel_drivers = TAILQ_HEAD_INITIALIZER(g_accel_drivers); 67 static struct spdk_accel_driver *g_accel_driver; 68 static struct spdk_accel_opts g_opts = { 69 .small_cache_size = ACCEL_SMALL_CACHE_SIZE, 70 .large_cache_size = ACCEL_LARGE_CACHE_SIZE, 71 .task_count = MAX_TASKS_PER_CHANNEL, 72 .sequence_count = MAX_TASKS_PER_CHANNEL, 73 .buf_count = MAX_TASKS_PER_CHANNEL, 74 }; 75 static struct accel_stats g_stats; 76 static struct spdk_spinlock g_stats_lock; 77 78 static const char *g_opcode_strings[SPDK_ACCEL_OPC_LAST] = { 79 "copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c", 80 "compress", "decompress", "encrypt", "decrypt", "xor", 81 "dif_verify", "dif_generate", "dif_generate_copy" 82 }; 83 84 enum accel_sequence_state { 85 ACCEL_SEQUENCE_STATE_INIT, 86 ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF, 87 ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF, 88 ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF, 89 ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF, 90 ACCEL_SEQUENCE_STATE_PULL_DATA, 91 ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA, 92 ACCEL_SEQUENCE_STATE_EXEC_TASK, 93 ACCEL_SEQUENCE_STATE_AWAIT_TASK, 94 ACCEL_SEQUENCE_STATE_COMPLETE_TASK, 95 ACCEL_SEQUENCE_STATE_NEXT_TASK, 96 ACCEL_SEQUENCE_STATE_PUSH_DATA, 97 ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA, 98 ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS, 99 ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS, 100 ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS, 101 ACCEL_SEQUENCE_STATE_ERROR, 102 ACCEL_SEQUENCE_STATE_MAX, 103 }; 104 105 static const char *g_seq_states[] 106 __attribute__((unused)) = { 107 [ACCEL_SEQUENCE_STATE_INIT] = "init", 108 [ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf", 109 [ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf", 110 [ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf", 111 [ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf", 112 [ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data", 113 [ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data", 114 [ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task", 115 [ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task", 116 [ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task", 117 [ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task", 118 [ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data", 119 [ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data", 120 [ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS] = "driver-exec-tasks", 121 [ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS] = "driver-await-tasks", 122 [ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS] = "driver-complete-tasks", 123 [ACCEL_SEQUENCE_STATE_ERROR] = "error", 124 [ACCEL_SEQUENCE_STATE_MAX] = "", 125 }; 126 127 #define ACCEL_SEQUENCE_STATE_STRING(s) \ 128 (((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \ 129 ? g_seq_states[s] : "unknown") 130 131 struct accel_buffer { 132 struct spdk_accel_sequence *seq; 133 void *buf; 134 uint64_t len; 135 struct spdk_iobuf_entry iobuf; 136 spdk_accel_sequence_get_buf_cb cb_fn; 137 void *cb_ctx; 138 SLIST_ENTRY(accel_buffer) link; 139 }; 140 141 struct accel_io_channel { 142 struct spdk_io_channel *module_ch[SPDK_ACCEL_OPC_LAST]; 143 struct spdk_io_channel *driver_channel; 144 void *task_pool_base; 145 struct spdk_accel_sequence *seq_pool_base; 146 struct accel_buffer *buf_pool_base; 147 struct spdk_accel_task_aux_data *task_aux_data_base; 148 STAILQ_HEAD(, spdk_accel_task) task_pool; 149 SLIST_HEAD(, spdk_accel_task_aux_data) task_aux_data_pool; 150 SLIST_HEAD(, spdk_accel_sequence) seq_pool; 151 SLIST_HEAD(, accel_buffer) buf_pool; 152 struct spdk_iobuf_channel iobuf; 153 struct accel_stats stats; 154 }; 155 156 TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task); 157 158 struct spdk_accel_sequence { 159 struct accel_io_channel *ch; 160 struct accel_sequence_tasks tasks; 161 SLIST_HEAD(, accel_buffer) bounce_bufs; 162 int status; 163 /* state uses enum accel_sequence_state */ 164 uint8_t state; 165 bool in_process_sequence; 166 spdk_accel_completion_cb cb_fn; 167 void *cb_arg; 168 SLIST_ENTRY(spdk_accel_sequence) link; 169 }; 170 SPDK_STATIC_ASSERT(sizeof(struct spdk_accel_sequence) == 64, "invalid size"); 171 172 #define accel_update_stats(ch, event, v) \ 173 do { \ 174 (ch)->stats.event += (v); \ 175 } while (0) 176 177 #define accel_update_task_stats(ch, task, event, v) \ 178 accel_update_stats(ch, operations[(task)->op_code].event, v) 179 180 static inline void accel_sequence_task_cb(void *cb_arg, int status); 181 182 static inline void 183 accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state) 184 { 185 SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq, 186 ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state)); 187 assert(seq->state != ACCEL_SEQUENCE_STATE_ERROR || state == ACCEL_SEQUENCE_STATE_ERROR); 188 seq->state = state; 189 } 190 191 static void 192 accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status) 193 { 194 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR); 195 assert(status != 0); 196 seq->status = status; 197 } 198 199 int 200 spdk_accel_get_opc_module_name(enum spdk_accel_opcode opcode, const char **module_name) 201 { 202 if (opcode >= SPDK_ACCEL_OPC_LAST) { 203 /* invalid opcode */ 204 return -EINVAL; 205 } 206 207 if (g_modules_opc[opcode].module) { 208 *module_name = g_modules_opc[opcode].module->name; 209 } else { 210 return -ENOENT; 211 } 212 213 return 0; 214 } 215 216 void 217 _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn) 218 { 219 struct spdk_accel_module_if *accel_module; 220 enum spdk_accel_opcode opcode; 221 int j = 0; 222 223 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 224 for (opcode = 0; opcode < SPDK_ACCEL_OPC_LAST; opcode++) { 225 if (accel_module->supports_opcode(opcode)) { 226 info->ops[j] = opcode; 227 j++; 228 } 229 } 230 info->name = accel_module->name; 231 info->num_ops = j; 232 fn(info); 233 j = 0; 234 } 235 } 236 237 const char * 238 spdk_accel_get_opcode_name(enum spdk_accel_opcode opcode) 239 { 240 if (opcode < SPDK_ACCEL_OPC_LAST) { 241 return g_opcode_strings[opcode]; 242 } 243 244 return NULL; 245 } 246 247 int 248 spdk_accel_assign_opc(enum spdk_accel_opcode opcode, const char *name) 249 { 250 char *copy; 251 252 if (g_modules_started == true) { 253 /* we don't allow re-assignment once things have started */ 254 return -EINVAL; 255 } 256 257 if (opcode >= SPDK_ACCEL_OPC_LAST) { 258 /* invalid opcode */ 259 return -EINVAL; 260 } 261 262 copy = strdup(name); 263 if (copy == NULL) { 264 return -ENOMEM; 265 } 266 267 /* module selection will be validated after the framework starts. */ 268 free(g_modules_opc_override[opcode]); 269 g_modules_opc_override[opcode] = copy; 270 271 return 0; 272 } 273 274 void 275 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status) 276 { 277 struct accel_io_channel *accel_ch = accel_task->accel_ch; 278 spdk_accel_completion_cb cb_fn; 279 void *cb_arg; 280 281 accel_update_task_stats(accel_ch, accel_task, executed, 1); 282 accel_update_task_stats(accel_ch, accel_task, num_bytes, accel_task->nbytes); 283 if (spdk_unlikely(status != 0)) { 284 accel_update_task_stats(accel_ch, accel_task, failed, 1); 285 } 286 287 if (accel_task->seq) { 288 accel_sequence_task_cb(accel_task->seq, status); 289 return; 290 } 291 292 cb_fn = accel_task->cb_fn; 293 cb_arg = accel_task->cb_arg; 294 295 if (accel_task->has_aux) { 296 SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task->aux, link); 297 accel_task->aux = NULL; 298 accel_task->has_aux = false; 299 } 300 301 /* We should put the accel_task into the list firstly in order to avoid 302 * the accel task list is exhausted when there is recursive call to 303 * allocate accel_task in user's call back function (cb_fn) 304 */ 305 STAILQ_INSERT_HEAD(&accel_ch->task_pool, accel_task, link); 306 307 cb_fn(cb_arg, status); 308 } 309 310 inline static struct spdk_accel_task * 311 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg) 312 { 313 struct spdk_accel_task *accel_task; 314 315 accel_task = STAILQ_FIRST(&accel_ch->task_pool); 316 if (spdk_unlikely(accel_task == NULL)) { 317 accel_update_stats(accel_ch, retry.task, 1); 318 return NULL; 319 } 320 321 STAILQ_REMOVE_HEAD(&accel_ch->task_pool, link); 322 accel_task->link.stqe_next = NULL; 323 324 accel_task->cb_fn = cb_fn; 325 accel_task->cb_arg = cb_arg; 326 accel_task->accel_ch = accel_ch; 327 accel_task->s.iovs = NULL; 328 accel_task->d.iovs = NULL; 329 330 return accel_task; 331 } 332 333 static inline int 334 accel_submit_task(struct accel_io_channel *accel_ch, struct spdk_accel_task *task) 335 { 336 struct spdk_io_channel *module_ch = accel_ch->module_ch[task->op_code]; 337 struct spdk_accel_module_if *module = g_modules_opc[task->op_code].module; 338 int rc; 339 340 rc = module->submit_tasks(module_ch, task); 341 if (spdk_unlikely(rc != 0)) { 342 accel_update_task_stats(accel_ch, task, failed, 1); 343 } 344 345 return rc; 346 } 347 348 static inline uint64_t 349 accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt) 350 { 351 uint64_t result = 0; 352 uint32_t i; 353 354 for (i = 0; i < iovcnt; ++i) { 355 result += iovs[i].iov_len; 356 } 357 358 return result; 359 } 360 361 #define ACCEL_TASK_ALLOC_AUX_BUF(task) \ 362 do { \ 363 (task)->aux = SLIST_FIRST(&(task)->accel_ch->task_aux_data_pool); \ 364 if (spdk_unlikely(!(task)->aux)) { \ 365 SPDK_ERRLOG("Fatal problem, aux data was not allocated\n"); \ 366 STAILQ_INSERT_HEAD(&(task)->accel_ch->task_pool, (task), link); \ 367 assert(0); \ 368 return -ENOMEM; \ 369 } \ 370 SLIST_REMOVE_HEAD(&(task)->accel_ch->task_aux_data_pool, link); \ 371 (task)->has_aux = true; \ 372 } while (0) 373 374 SPDK_LOG_DEPRECATION_REGISTER(accel_flags, 375 "The flags parameter is unused and deprecated", 376 "v24.05", 0); 377 378 /* \b `flags` is int in API, since it is not used anywahere. we narrowed it down to uint8_t internally 379 * To prevent possible problems in the future, add a macro which checks that the value of `flags` passed in the API 380 * doesn't exceed 1 byte. */ 381 #define ACCEL_ASSIGN_FLAGS(task, flags) \ 382 do { \ 383 assert(((flags) & (~0xff)) == 0 && "task::flags needs to be extended"); \ 384 (task)->flags = (uint8_t)(flags); \ 385 if ((task)->flags) { \ 386 SPDK_LOG_DEPRECATED(accel_flags); \ 387 } \ 388 } while (0) \ 389 390 /* Accel framework public API for copy function */ 391 int 392 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src, 393 uint64_t nbytes, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg) 394 { 395 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 396 struct spdk_accel_task *accel_task; 397 398 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 399 if (spdk_unlikely(accel_task == NULL)) { 400 return -ENOMEM; 401 } 402 403 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 404 405 accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC]; 406 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 407 accel_task->d.iovs[0].iov_base = dst; 408 accel_task->d.iovs[0].iov_len = nbytes; 409 accel_task->d.iovcnt = 1; 410 accel_task->s.iovs[0].iov_base = src; 411 accel_task->s.iovs[0].iov_len = nbytes; 412 accel_task->s.iovcnt = 1; 413 accel_task->nbytes = nbytes; 414 accel_task->op_code = SPDK_ACCEL_OPC_COPY; 415 accel_task->src_domain = NULL; 416 accel_task->dst_domain = NULL; 417 ACCEL_ASSIGN_FLAGS(accel_task, flags); 418 419 return accel_submit_task(accel_ch, accel_task); 420 } 421 422 /* Accel framework public API for dual cast copy function */ 423 int 424 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1, 425 void *dst2, void *src, uint64_t nbytes, int flags, 426 spdk_accel_completion_cb cb_fn, void *cb_arg) 427 { 428 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 429 struct spdk_accel_task *accel_task; 430 431 if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) { 432 SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n"); 433 return -EINVAL; 434 } 435 436 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 437 if (spdk_unlikely(accel_task == NULL)) { 438 return -ENOMEM; 439 } 440 441 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 442 443 accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC]; 444 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 445 accel_task->d2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST2]; 446 accel_task->d.iovs[0].iov_base = dst1; 447 accel_task->d.iovs[0].iov_len = nbytes; 448 accel_task->d.iovcnt = 1; 449 accel_task->d2.iovs[0].iov_base = dst2; 450 accel_task->d2.iovs[0].iov_len = nbytes; 451 accel_task->d2.iovcnt = 1; 452 accel_task->s.iovs[0].iov_base = src; 453 accel_task->s.iovs[0].iov_len = nbytes; 454 accel_task->s.iovcnt = 1; 455 accel_task->nbytes = nbytes; 456 accel_task->op_code = SPDK_ACCEL_OPC_DUALCAST; 457 accel_task->src_domain = NULL; 458 accel_task->dst_domain = NULL; 459 ACCEL_ASSIGN_FLAGS(accel_task, flags); 460 461 return accel_submit_task(accel_ch, accel_task); 462 } 463 464 /* Accel framework public API for compare function */ 465 466 int 467 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1, 468 void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn, 469 void *cb_arg) 470 { 471 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 472 struct spdk_accel_task *accel_task; 473 474 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 475 if (spdk_unlikely(accel_task == NULL)) { 476 return -ENOMEM; 477 } 478 479 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 480 481 accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC]; 482 accel_task->s2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC2]; 483 accel_task->s.iovs[0].iov_base = src1; 484 accel_task->s.iovs[0].iov_len = nbytes; 485 accel_task->s.iovcnt = 1; 486 accel_task->s2.iovs[0].iov_base = src2; 487 accel_task->s2.iovs[0].iov_len = nbytes; 488 accel_task->s2.iovcnt = 1; 489 accel_task->nbytes = nbytes; 490 accel_task->op_code = SPDK_ACCEL_OPC_COMPARE; 491 accel_task->src_domain = NULL; 492 accel_task->dst_domain = NULL; 493 accel_task->flags = 0; 494 495 return accel_submit_task(accel_ch, accel_task); 496 } 497 498 /* Accel framework public API for fill function */ 499 int 500 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst, 501 uint8_t fill, uint64_t nbytes, int flags, 502 spdk_accel_completion_cb cb_fn, void *cb_arg) 503 { 504 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 505 struct spdk_accel_task *accel_task; 506 507 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 508 if (spdk_unlikely(accel_task == NULL)) { 509 return -ENOMEM; 510 } 511 512 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 513 514 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 515 accel_task->d.iovs[0].iov_base = dst; 516 accel_task->d.iovs[0].iov_len = nbytes; 517 accel_task->d.iovcnt = 1; 518 accel_task->nbytes = nbytes; 519 memset(&accel_task->fill_pattern, fill, sizeof(uint64_t)); 520 accel_task->op_code = SPDK_ACCEL_OPC_FILL; 521 accel_task->src_domain = NULL; 522 accel_task->dst_domain = NULL; 523 ACCEL_ASSIGN_FLAGS(accel_task, flags); 524 525 return accel_submit_task(accel_ch, accel_task); 526 } 527 528 /* Accel framework public API for CRC-32C function */ 529 int 530 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst, 531 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn, 532 void *cb_arg) 533 { 534 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 535 struct spdk_accel_task *accel_task; 536 537 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 538 if (spdk_unlikely(accel_task == NULL)) { 539 return -ENOMEM; 540 } 541 542 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 543 544 accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC]; 545 accel_task->s.iovs[0].iov_base = src; 546 accel_task->s.iovs[0].iov_len = nbytes; 547 accel_task->s.iovcnt = 1; 548 accel_task->nbytes = nbytes; 549 accel_task->crc_dst = crc_dst; 550 accel_task->seed = seed; 551 accel_task->op_code = SPDK_ACCEL_OPC_CRC32C; 552 accel_task->src_domain = NULL; 553 accel_task->dst_domain = NULL; 554 accel_task->flags = 0; 555 556 return accel_submit_task(accel_ch, accel_task); 557 } 558 559 /* Accel framework public API for chained CRC-32C function */ 560 int 561 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst, 562 struct iovec *iov, uint32_t iov_cnt, uint32_t seed, 563 spdk_accel_completion_cb cb_fn, void *cb_arg) 564 { 565 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 566 struct spdk_accel_task *accel_task; 567 568 if (iov == NULL) { 569 SPDK_ERRLOG("iov should not be NULL"); 570 return -EINVAL; 571 } 572 573 if (!iov_cnt) { 574 SPDK_ERRLOG("iovcnt should not be zero value\n"); 575 return -EINVAL; 576 } 577 578 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 579 if (spdk_unlikely(accel_task == NULL)) { 580 SPDK_ERRLOG("no memory\n"); 581 assert(0); 582 return -ENOMEM; 583 } 584 585 accel_task->s.iovs = iov; 586 accel_task->s.iovcnt = iov_cnt; 587 accel_task->nbytes = accel_get_iovlen(iov, iov_cnt); 588 accel_task->crc_dst = crc_dst; 589 accel_task->seed = seed; 590 accel_task->op_code = SPDK_ACCEL_OPC_CRC32C; 591 accel_task->src_domain = NULL; 592 accel_task->dst_domain = NULL; 593 accel_task->flags = 0; 594 595 return accel_submit_task(accel_ch, accel_task); 596 } 597 598 /* Accel framework public API for copy with CRC-32C function */ 599 int 600 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst, 601 void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes, 602 int flags, spdk_accel_completion_cb cb_fn, void *cb_arg) 603 { 604 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 605 struct spdk_accel_task *accel_task; 606 607 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 608 if (spdk_unlikely(accel_task == NULL)) { 609 return -ENOMEM; 610 } 611 612 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 613 614 accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC]; 615 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 616 accel_task->d.iovs[0].iov_base = dst; 617 accel_task->d.iovs[0].iov_len = nbytes; 618 accel_task->d.iovcnt = 1; 619 accel_task->s.iovs[0].iov_base = src; 620 accel_task->s.iovs[0].iov_len = nbytes; 621 accel_task->s.iovcnt = 1; 622 accel_task->nbytes = nbytes; 623 accel_task->crc_dst = crc_dst; 624 accel_task->seed = seed; 625 accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C; 626 accel_task->src_domain = NULL; 627 accel_task->dst_domain = NULL; 628 ACCEL_ASSIGN_FLAGS(accel_task, flags); 629 630 return accel_submit_task(accel_ch, accel_task); 631 } 632 633 /* Accel framework public API for chained copy + CRC-32C function */ 634 int 635 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst, 636 struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst, 637 uint32_t seed, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg) 638 { 639 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 640 struct spdk_accel_task *accel_task; 641 uint64_t nbytes; 642 643 if (src_iovs == NULL) { 644 SPDK_ERRLOG("iov should not be NULL"); 645 return -EINVAL; 646 } 647 648 if (!iov_cnt) { 649 SPDK_ERRLOG("iovcnt should not be zero value\n"); 650 return -EINVAL; 651 } 652 653 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 654 if (spdk_unlikely(accel_task == NULL)) { 655 SPDK_ERRLOG("no memory\n"); 656 assert(0); 657 return -ENOMEM; 658 } 659 660 nbytes = accel_get_iovlen(src_iovs, iov_cnt); 661 662 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 663 664 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 665 accel_task->d.iovs[0].iov_base = dst; 666 accel_task->d.iovs[0].iov_len = nbytes; 667 accel_task->d.iovcnt = 1; 668 accel_task->s.iovs = src_iovs; 669 accel_task->s.iovcnt = iov_cnt; 670 accel_task->nbytes = nbytes; 671 accel_task->crc_dst = crc_dst; 672 accel_task->seed = seed; 673 accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C; 674 accel_task->src_domain = NULL; 675 accel_task->dst_domain = NULL; 676 ACCEL_ASSIGN_FLAGS(accel_task, flags); 677 678 return accel_submit_task(accel_ch, accel_task); 679 } 680 681 int 682 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes, 683 struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size, int flags, 684 spdk_accel_completion_cb cb_fn, void *cb_arg) 685 { 686 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 687 struct spdk_accel_task *accel_task; 688 689 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 690 if (spdk_unlikely(accel_task == NULL)) { 691 return -ENOMEM; 692 } 693 694 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 695 696 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 697 accel_task->d.iovs[0].iov_base = dst; 698 accel_task->d.iovs[0].iov_len = nbytes; 699 accel_task->d.iovcnt = 1; 700 accel_task->output_size = output_size; 701 accel_task->s.iovs = src_iovs; 702 accel_task->s.iovcnt = src_iovcnt; 703 accel_task->nbytes = nbytes; 704 accel_task->op_code = SPDK_ACCEL_OPC_COMPRESS; 705 accel_task->src_domain = NULL; 706 accel_task->dst_domain = NULL; 707 ACCEL_ASSIGN_FLAGS(accel_task, flags); 708 709 return accel_submit_task(accel_ch, accel_task); 710 } 711 712 int 713 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs, 714 size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt, 715 uint32_t *output_size, int flags, spdk_accel_completion_cb cb_fn, 716 void *cb_arg) 717 { 718 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 719 struct spdk_accel_task *accel_task; 720 721 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 722 if (spdk_unlikely(accel_task == NULL)) { 723 return -ENOMEM; 724 } 725 726 accel_task->output_size = output_size; 727 accel_task->s.iovs = src_iovs; 728 accel_task->s.iovcnt = src_iovcnt; 729 accel_task->d.iovs = dst_iovs; 730 accel_task->d.iovcnt = dst_iovcnt; 731 accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 732 accel_task->op_code = SPDK_ACCEL_OPC_DECOMPRESS; 733 accel_task->src_domain = NULL; 734 accel_task->dst_domain = NULL; 735 ACCEL_ASSIGN_FLAGS(accel_task, flags); 736 737 return accel_submit_task(accel_ch, accel_task); 738 } 739 740 int 741 spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key, 742 struct iovec *dst_iovs, uint32_t dst_iovcnt, 743 struct iovec *src_iovs, uint32_t src_iovcnt, 744 uint64_t iv, uint32_t block_size, int flags, 745 spdk_accel_completion_cb cb_fn, void *cb_arg) 746 { 747 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 748 struct spdk_accel_task *accel_task; 749 750 if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) { 751 return -EINVAL; 752 } 753 754 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 755 if (spdk_unlikely(accel_task == NULL)) { 756 return -ENOMEM; 757 } 758 759 accel_task->crypto_key = key; 760 accel_task->s.iovs = src_iovs; 761 accel_task->s.iovcnt = src_iovcnt; 762 accel_task->d.iovs = dst_iovs; 763 accel_task->d.iovcnt = dst_iovcnt; 764 accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 765 accel_task->iv = iv; 766 accel_task->block_size = block_size; 767 accel_task->op_code = SPDK_ACCEL_OPC_ENCRYPT; 768 accel_task->src_domain = NULL; 769 accel_task->dst_domain = NULL; 770 ACCEL_ASSIGN_FLAGS(accel_task, flags); 771 772 return accel_submit_task(accel_ch, accel_task); 773 } 774 775 int 776 spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key, 777 struct iovec *dst_iovs, uint32_t dst_iovcnt, 778 struct iovec *src_iovs, uint32_t src_iovcnt, 779 uint64_t iv, uint32_t block_size, int flags, 780 spdk_accel_completion_cb cb_fn, void *cb_arg) 781 { 782 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 783 struct spdk_accel_task *accel_task; 784 785 if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) { 786 return -EINVAL; 787 } 788 789 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 790 if (spdk_unlikely(accel_task == NULL)) { 791 return -ENOMEM; 792 } 793 794 accel_task->crypto_key = key; 795 accel_task->s.iovs = src_iovs; 796 accel_task->s.iovcnt = src_iovcnt; 797 accel_task->d.iovs = dst_iovs; 798 accel_task->d.iovcnt = dst_iovcnt; 799 accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 800 accel_task->iv = iv; 801 accel_task->block_size = block_size; 802 accel_task->op_code = SPDK_ACCEL_OPC_DECRYPT; 803 accel_task->src_domain = NULL; 804 accel_task->dst_domain = NULL; 805 ACCEL_ASSIGN_FLAGS(accel_task, flags); 806 807 return accel_submit_task(accel_ch, accel_task); 808 } 809 810 int 811 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs, 812 uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg) 813 { 814 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 815 struct spdk_accel_task *accel_task; 816 817 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 818 if (spdk_unlikely(accel_task == NULL)) { 819 return -ENOMEM; 820 } 821 822 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 823 824 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 825 accel_task->nsrcs.srcs = sources; 826 accel_task->nsrcs.cnt = nsrcs; 827 accel_task->d.iovs[0].iov_base = dst; 828 accel_task->d.iovs[0].iov_len = nbytes; 829 accel_task->d.iovcnt = 1; 830 accel_task->nbytes = nbytes; 831 accel_task->op_code = SPDK_ACCEL_OPC_XOR; 832 accel_task->src_domain = NULL; 833 accel_task->dst_domain = NULL; 834 835 return accel_submit_task(accel_ch, accel_task); 836 } 837 838 int 839 spdk_accel_submit_dif_verify(struct spdk_io_channel *ch, 840 struct iovec *iovs, size_t iovcnt, uint32_t num_blocks, 841 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err, 842 spdk_accel_completion_cb cb_fn, void *cb_arg) 843 { 844 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 845 struct spdk_accel_task *accel_task; 846 847 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 848 if (accel_task == NULL) { 849 return -ENOMEM; 850 } 851 852 accel_task->s.iovs = iovs; 853 accel_task->s.iovcnt = iovcnt; 854 accel_task->dif.ctx = ctx; 855 accel_task->dif.err = err; 856 accel_task->dif.num_blocks = num_blocks; 857 accel_task->nbytes = num_blocks * ctx->block_size; 858 accel_task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY; 859 accel_task->src_domain = NULL; 860 accel_task->dst_domain = NULL; 861 862 return accel_submit_task(accel_ch, accel_task); 863 } 864 865 int 866 spdk_accel_submit_dif_generate(struct spdk_io_channel *ch, 867 struct iovec *iovs, size_t iovcnt, uint32_t num_blocks, 868 const struct spdk_dif_ctx *ctx, 869 spdk_accel_completion_cb cb_fn, void *cb_arg) 870 { 871 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 872 struct spdk_accel_task *accel_task; 873 874 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 875 if (accel_task == NULL) { 876 return -ENOMEM; 877 } 878 879 accel_task->s.iovs = iovs; 880 accel_task->s.iovcnt = iovcnt; 881 accel_task->dif.ctx = ctx; 882 accel_task->dif.num_blocks = num_blocks; 883 accel_task->nbytes = num_blocks * ctx->block_size; 884 accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE; 885 accel_task->src_domain = NULL; 886 accel_task->dst_domain = NULL; 887 888 return accel_submit_task(accel_ch, accel_task); 889 } 890 891 int 892 spdk_accel_submit_dif_generate_copy(struct spdk_io_channel *ch, struct iovec *dst_iovs, 893 size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt, 894 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 895 spdk_accel_completion_cb cb_fn, void *cb_arg) 896 { 897 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 898 struct spdk_accel_task *accel_task; 899 900 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 901 if (accel_task == NULL) { 902 return -ENOMEM; 903 } 904 905 accel_task->s.iovs = src_iovs; 906 accel_task->s.iovcnt = src_iovcnt; 907 accel_task->d.iovs = dst_iovs; 908 accel_task->d.iovcnt = dst_iovcnt; 909 accel_task->dif.ctx = ctx; 910 accel_task->dif.num_blocks = num_blocks; 911 accel_task->nbytes = num_blocks * ctx->block_size; 912 accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE_COPY; 913 accel_task->src_domain = NULL; 914 accel_task->dst_domain = NULL; 915 916 return accel_submit_task(accel_ch, accel_task); 917 } 918 919 static inline struct accel_buffer * 920 accel_get_buf(struct accel_io_channel *ch, uint64_t len) 921 { 922 struct accel_buffer *buf; 923 924 buf = SLIST_FIRST(&ch->buf_pool); 925 if (spdk_unlikely(buf == NULL)) { 926 accel_update_stats(ch, retry.bufdesc, 1); 927 return NULL; 928 } 929 930 SLIST_REMOVE_HEAD(&ch->buf_pool, link); 931 buf->len = len; 932 buf->buf = NULL; 933 buf->seq = NULL; 934 buf->cb_fn = NULL; 935 936 return buf; 937 } 938 939 static inline void 940 accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf) 941 { 942 if (buf->buf != NULL) { 943 spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len); 944 } 945 946 SLIST_INSERT_HEAD(&ch->buf_pool, buf, link); 947 } 948 949 static inline struct spdk_accel_sequence * 950 accel_sequence_get(struct accel_io_channel *ch) 951 { 952 struct spdk_accel_sequence *seq; 953 954 seq = SLIST_FIRST(&ch->seq_pool); 955 if (spdk_unlikely(seq == NULL)) { 956 accel_update_stats(ch, retry.sequence, 1); 957 return NULL; 958 } 959 960 SLIST_REMOVE_HEAD(&ch->seq_pool, link); 961 962 TAILQ_INIT(&seq->tasks); 963 SLIST_INIT(&seq->bounce_bufs); 964 965 seq->ch = ch; 966 seq->status = 0; 967 seq->state = ACCEL_SEQUENCE_STATE_INIT; 968 seq->in_process_sequence = false; 969 970 return seq; 971 } 972 973 static inline void 974 accel_sequence_put(struct spdk_accel_sequence *seq) 975 { 976 struct accel_io_channel *ch = seq->ch; 977 struct accel_buffer *buf; 978 979 while (!SLIST_EMPTY(&seq->bounce_bufs)) { 980 buf = SLIST_FIRST(&seq->bounce_bufs); 981 SLIST_REMOVE_HEAD(&seq->bounce_bufs, link); 982 accel_put_buf(seq->ch, buf); 983 } 984 985 assert(TAILQ_EMPTY(&seq->tasks)); 986 seq->ch = NULL; 987 988 SLIST_INSERT_HEAD(&ch->seq_pool, seq, link); 989 } 990 991 static void accel_sequence_task_cb(void *cb_arg, int status); 992 993 static inline struct spdk_accel_task * 994 accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq, 995 spdk_accel_step_cb cb_fn, void *cb_arg) 996 { 997 struct spdk_accel_task *task; 998 999 task = _get_task(ch, NULL, NULL); 1000 if (spdk_unlikely(task == NULL)) { 1001 return task; 1002 } 1003 1004 task->step_cb_fn = cb_fn; 1005 task->cb_arg = cb_arg; 1006 task->seq = seq; 1007 1008 return task; 1009 } 1010 1011 int 1012 spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1013 struct iovec *dst_iovs, uint32_t dst_iovcnt, 1014 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1015 struct iovec *src_iovs, uint32_t src_iovcnt, 1016 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1017 int flags, spdk_accel_step_cb cb_fn, void *cb_arg) 1018 { 1019 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1020 struct spdk_accel_task *task; 1021 struct spdk_accel_sequence *seq = *pseq; 1022 1023 if (seq == NULL) { 1024 seq = accel_sequence_get(accel_ch); 1025 if (spdk_unlikely(seq == NULL)) { 1026 return -ENOMEM; 1027 } 1028 } 1029 1030 assert(seq->ch == accel_ch); 1031 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1032 if (spdk_unlikely(task == NULL)) { 1033 if (*pseq == NULL) { 1034 accel_sequence_put(seq); 1035 } 1036 1037 return -ENOMEM; 1038 } 1039 1040 task->dst_domain = dst_domain; 1041 task->dst_domain_ctx = dst_domain_ctx; 1042 task->d.iovs = dst_iovs; 1043 task->d.iovcnt = dst_iovcnt; 1044 task->src_domain = src_domain; 1045 task->src_domain_ctx = src_domain_ctx; 1046 task->s.iovs = src_iovs; 1047 task->s.iovcnt = src_iovcnt; 1048 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1049 task->op_code = SPDK_ACCEL_OPC_COPY; 1050 ACCEL_ASSIGN_FLAGS(task, flags); 1051 1052 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1053 *pseq = seq; 1054 1055 return 0; 1056 } 1057 1058 int 1059 spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1060 void *buf, uint64_t len, 1061 struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern, 1062 int flags, spdk_accel_step_cb cb_fn, void *cb_arg) 1063 { 1064 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1065 struct spdk_accel_task *task; 1066 struct spdk_accel_sequence *seq = *pseq; 1067 1068 if (seq == NULL) { 1069 seq = accel_sequence_get(accel_ch); 1070 if (spdk_unlikely(seq == NULL)) { 1071 return -ENOMEM; 1072 } 1073 } 1074 1075 assert(seq->ch == accel_ch); 1076 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1077 if (spdk_unlikely(task == NULL)) { 1078 if (*pseq == NULL) { 1079 accel_sequence_put(seq); 1080 } 1081 1082 return -ENOMEM; 1083 } 1084 1085 memset(&task->fill_pattern, pattern, sizeof(uint64_t)); 1086 1087 task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool); 1088 if (spdk_unlikely(!task->aux)) { 1089 SPDK_ERRLOG("Fatal problem, aux data was not allocated\n"); 1090 if (*pseq == NULL) { 1091 accel_sequence_put((seq)); 1092 } 1093 STAILQ_INSERT_HEAD(&task->accel_ch->task_pool, task, link); 1094 task->seq = NULL; 1095 assert(0); 1096 return -ENOMEM; 1097 } 1098 SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link); 1099 task->has_aux = true; 1100 1101 task->d.iovs = &task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 1102 task->d.iovs[0].iov_base = buf; 1103 task->d.iovs[0].iov_len = len; 1104 task->d.iovcnt = 1; 1105 task->nbytes = len; 1106 task->src_domain = NULL; 1107 task->dst_domain = domain; 1108 task->dst_domain_ctx = domain_ctx; 1109 task->op_code = SPDK_ACCEL_OPC_FILL; 1110 ACCEL_ASSIGN_FLAGS(task, flags); 1111 1112 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1113 *pseq = seq; 1114 1115 return 0; 1116 } 1117 1118 int 1119 spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1120 struct iovec *dst_iovs, size_t dst_iovcnt, 1121 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1122 struct iovec *src_iovs, size_t src_iovcnt, 1123 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1124 int flags, spdk_accel_step_cb cb_fn, void *cb_arg) 1125 { 1126 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1127 struct spdk_accel_task *task; 1128 struct spdk_accel_sequence *seq = *pseq; 1129 1130 if (seq == NULL) { 1131 seq = accel_sequence_get(accel_ch); 1132 if (spdk_unlikely(seq == NULL)) { 1133 return -ENOMEM; 1134 } 1135 } 1136 1137 assert(seq->ch == accel_ch); 1138 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1139 if (spdk_unlikely(task == NULL)) { 1140 if (*pseq == NULL) { 1141 accel_sequence_put(seq); 1142 } 1143 1144 return -ENOMEM; 1145 } 1146 1147 /* TODO: support output_size for chaining */ 1148 task->output_size = NULL; 1149 task->dst_domain = dst_domain; 1150 task->dst_domain_ctx = dst_domain_ctx; 1151 task->d.iovs = dst_iovs; 1152 task->d.iovcnt = dst_iovcnt; 1153 task->src_domain = src_domain; 1154 task->src_domain_ctx = src_domain_ctx; 1155 task->s.iovs = src_iovs; 1156 task->s.iovcnt = src_iovcnt; 1157 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1158 task->op_code = SPDK_ACCEL_OPC_DECOMPRESS; 1159 ACCEL_ASSIGN_FLAGS(task, flags); 1160 1161 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1162 *pseq = seq; 1163 1164 return 0; 1165 } 1166 1167 int 1168 spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1169 struct spdk_accel_crypto_key *key, 1170 struct iovec *dst_iovs, uint32_t dst_iovcnt, 1171 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1172 struct iovec *src_iovs, uint32_t src_iovcnt, 1173 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1174 uint64_t iv, uint32_t block_size, int flags, 1175 spdk_accel_step_cb cb_fn, void *cb_arg) 1176 { 1177 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1178 struct spdk_accel_task *task; 1179 struct spdk_accel_sequence *seq = *pseq; 1180 1181 assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size); 1182 1183 if (seq == NULL) { 1184 seq = accel_sequence_get(accel_ch); 1185 if (spdk_unlikely(seq == NULL)) { 1186 return -ENOMEM; 1187 } 1188 } 1189 1190 assert(seq->ch == accel_ch); 1191 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1192 if (spdk_unlikely(task == NULL)) { 1193 if (*pseq == NULL) { 1194 accel_sequence_put(seq); 1195 } 1196 1197 return -ENOMEM; 1198 } 1199 1200 task->crypto_key = key; 1201 task->src_domain = src_domain; 1202 task->src_domain_ctx = src_domain_ctx; 1203 task->s.iovs = src_iovs; 1204 task->s.iovcnt = src_iovcnt; 1205 task->dst_domain = dst_domain; 1206 task->dst_domain_ctx = dst_domain_ctx; 1207 task->d.iovs = dst_iovs; 1208 task->d.iovcnt = dst_iovcnt; 1209 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1210 task->iv = iv; 1211 task->block_size = block_size; 1212 task->op_code = SPDK_ACCEL_OPC_ENCRYPT; 1213 ACCEL_ASSIGN_FLAGS(task, flags); 1214 1215 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1216 *pseq = seq; 1217 1218 return 0; 1219 } 1220 1221 int 1222 spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1223 struct spdk_accel_crypto_key *key, 1224 struct iovec *dst_iovs, uint32_t dst_iovcnt, 1225 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1226 struct iovec *src_iovs, uint32_t src_iovcnt, 1227 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1228 uint64_t iv, uint32_t block_size, int flags, 1229 spdk_accel_step_cb cb_fn, void *cb_arg) 1230 { 1231 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1232 struct spdk_accel_task *task; 1233 struct spdk_accel_sequence *seq = *pseq; 1234 1235 assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size); 1236 1237 if (seq == NULL) { 1238 seq = accel_sequence_get(accel_ch); 1239 if (spdk_unlikely(seq == NULL)) { 1240 return -ENOMEM; 1241 } 1242 } 1243 1244 assert(seq->ch == accel_ch); 1245 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1246 if (spdk_unlikely(task == NULL)) { 1247 if (*pseq == NULL) { 1248 accel_sequence_put(seq); 1249 } 1250 1251 return -ENOMEM; 1252 } 1253 1254 task->crypto_key = key; 1255 task->src_domain = src_domain; 1256 task->src_domain_ctx = src_domain_ctx; 1257 task->s.iovs = src_iovs; 1258 task->s.iovcnt = src_iovcnt; 1259 task->dst_domain = dst_domain; 1260 task->dst_domain_ctx = dst_domain_ctx; 1261 task->d.iovs = dst_iovs; 1262 task->d.iovcnt = dst_iovcnt; 1263 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1264 task->iv = iv; 1265 task->block_size = block_size; 1266 task->op_code = SPDK_ACCEL_OPC_DECRYPT; 1267 ACCEL_ASSIGN_FLAGS(task, flags); 1268 1269 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1270 *pseq = seq; 1271 1272 return 0; 1273 } 1274 1275 int 1276 spdk_accel_append_crc32c(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1277 uint32_t *dst, struct iovec *iovs, uint32_t iovcnt, 1278 struct spdk_memory_domain *domain, void *domain_ctx, 1279 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg) 1280 { 1281 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1282 struct spdk_accel_task *task; 1283 struct spdk_accel_sequence *seq = *pseq; 1284 1285 if (seq == NULL) { 1286 seq = accel_sequence_get(accel_ch); 1287 if (spdk_unlikely(seq == NULL)) { 1288 return -ENOMEM; 1289 } 1290 } 1291 1292 assert(seq->ch == accel_ch); 1293 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1294 if (spdk_unlikely(task == NULL)) { 1295 if (*pseq == NULL) { 1296 accel_sequence_put(seq); 1297 } 1298 1299 return -ENOMEM; 1300 } 1301 1302 task->s.iovs = iovs; 1303 task->s.iovcnt = iovcnt; 1304 task->src_domain = domain; 1305 task->src_domain_ctx = domain_ctx; 1306 task->nbytes = accel_get_iovlen(iovs, iovcnt); 1307 task->crc_dst = dst; 1308 task->seed = seed; 1309 task->op_code = SPDK_ACCEL_OPC_CRC32C; 1310 task->dst_domain = NULL; 1311 1312 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1313 *pseq = seq; 1314 1315 return 0; 1316 } 1317 1318 int 1319 spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf, 1320 struct spdk_memory_domain **domain, void **domain_ctx) 1321 { 1322 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1323 struct accel_buffer *accel_buf; 1324 1325 accel_buf = accel_get_buf(accel_ch, len); 1326 if (spdk_unlikely(accel_buf == NULL)) { 1327 return -ENOMEM; 1328 } 1329 1330 /* We always return the same pointer and identify the buffers through domain_ctx */ 1331 *buf = ACCEL_BUFFER_BASE; 1332 *domain_ctx = accel_buf; 1333 *domain = g_accel_domain; 1334 1335 return 0; 1336 } 1337 1338 void 1339 spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf, 1340 struct spdk_memory_domain *domain, void *domain_ctx) 1341 { 1342 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1343 struct accel_buffer *accel_buf = domain_ctx; 1344 1345 assert(domain == g_accel_domain); 1346 assert(buf == ACCEL_BUFFER_BASE); 1347 1348 accel_put_buf(accel_ch, accel_buf); 1349 } 1350 1351 static void 1352 accel_sequence_complete_task(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1353 { 1354 struct accel_io_channel *ch = seq->ch; 1355 spdk_accel_step_cb cb_fn; 1356 void *cb_arg; 1357 1358 TAILQ_REMOVE(&seq->tasks, task, seq_link); 1359 cb_fn = task->step_cb_fn; 1360 cb_arg = task->cb_arg; 1361 task->seq = NULL; 1362 if (task->has_aux) { 1363 SLIST_INSERT_HEAD(&ch->task_aux_data_pool, task->aux, link); 1364 task->aux = NULL; 1365 task->has_aux = false; 1366 } 1367 STAILQ_INSERT_HEAD(&ch->task_pool, task, link); 1368 if (cb_fn != NULL) { 1369 cb_fn(cb_arg); 1370 } 1371 } 1372 1373 static void 1374 accel_sequence_complete_tasks(struct spdk_accel_sequence *seq) 1375 { 1376 struct spdk_accel_task *task; 1377 1378 while (!TAILQ_EMPTY(&seq->tasks)) { 1379 task = TAILQ_FIRST(&seq->tasks); 1380 accel_sequence_complete_task(seq, task); 1381 } 1382 } 1383 1384 static void 1385 accel_sequence_complete(struct spdk_accel_sequence *seq) 1386 { 1387 SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, seq->status); 1388 1389 accel_update_stats(seq->ch, sequence_executed, 1); 1390 if (spdk_unlikely(seq->status != 0)) { 1391 accel_update_stats(seq->ch, sequence_failed, 1); 1392 } 1393 1394 /* First notify all users that appended operations to this sequence */ 1395 accel_sequence_complete_tasks(seq); 1396 1397 /* Then notify the user that finished the sequence */ 1398 seq->cb_fn(seq->cb_arg, seq->status); 1399 1400 accel_sequence_put(seq); 1401 } 1402 1403 static void 1404 accel_update_virt_iov(struct iovec *diov, struct iovec *siov, struct accel_buffer *accel_buf) 1405 { 1406 uintptr_t offset; 1407 1408 offset = (uintptr_t)siov->iov_base & ACCEL_BUFFER_OFFSET_MASK; 1409 assert(offset < accel_buf->len); 1410 1411 diov->iov_base = (char *)accel_buf->buf + offset; 1412 diov->iov_len = siov->iov_len; 1413 } 1414 1415 static void 1416 accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf) 1417 { 1418 struct spdk_accel_task *task; 1419 struct iovec *iov; 1420 1421 /* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks 1422 * in a sequence that were using it. 1423 */ 1424 TAILQ_FOREACH(task, &seq->tasks, seq_link) { 1425 if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) { 1426 if (!task->has_aux) { 1427 task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool); 1428 assert(task->aux && "Can't allocate aux data structure"); 1429 task->has_aux = true; 1430 SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link); 1431 } 1432 1433 iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_SRC]; 1434 assert(task->s.iovcnt == 1); 1435 accel_update_virt_iov(iov, &task->s.iovs[0], buf); 1436 task->src_domain = NULL; 1437 task->s.iovs = iov; 1438 } 1439 if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) { 1440 if (!task->has_aux) { 1441 task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool); 1442 assert(task->aux && "Can't allocate aux data structure"); 1443 task->has_aux = true; 1444 SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link); 1445 } 1446 1447 iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_DST]; 1448 assert(task->d.iovcnt == 1); 1449 accel_update_virt_iov(iov, &task->d.iovs[0], buf); 1450 task->dst_domain = NULL; 1451 task->d.iovs = iov; 1452 } 1453 } 1454 } 1455 1456 static void accel_process_sequence(struct spdk_accel_sequence *seq); 1457 1458 static void 1459 accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf) 1460 { 1461 struct accel_buffer *accel_buf; 1462 1463 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1464 1465 assert(accel_buf->seq != NULL); 1466 assert(accel_buf->buf == NULL); 1467 accel_buf->buf = buf; 1468 1469 assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF); 1470 accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF); 1471 accel_sequence_set_virtbuf(accel_buf->seq, accel_buf); 1472 accel_process_sequence(accel_buf->seq); 1473 } 1474 1475 static bool 1476 accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf, 1477 spdk_iobuf_get_cb cb_fn) 1478 { 1479 struct accel_io_channel *ch = seq->ch; 1480 1481 assert(buf->buf == NULL); 1482 assert(buf->seq == NULL); 1483 1484 buf->seq = seq; 1485 buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn); 1486 if (buf->buf == NULL) { 1487 accel_update_stats(ch, retry.iobuf, 1); 1488 return false; 1489 } 1490 1491 return true; 1492 } 1493 1494 static bool 1495 accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1496 { 1497 /* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to 1498 * NULL */ 1499 if (task->src_domain == g_accel_domain) { 1500 if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx, 1501 accel_iobuf_get_virtbuf_cb)) { 1502 return false; 1503 } 1504 1505 accel_sequence_set_virtbuf(seq, task->src_domain_ctx); 1506 } 1507 1508 if (task->dst_domain == g_accel_domain) { 1509 if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx, 1510 accel_iobuf_get_virtbuf_cb)) { 1511 return false; 1512 } 1513 1514 accel_sequence_set_virtbuf(seq, task->dst_domain_ctx); 1515 } 1516 1517 return true; 1518 } 1519 1520 static void 1521 accel_sequence_get_buf_cb(struct spdk_iobuf_entry *entry, void *buf) 1522 { 1523 struct accel_buffer *accel_buf; 1524 1525 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1526 1527 assert(accel_buf->seq != NULL); 1528 assert(accel_buf->buf == NULL); 1529 accel_buf->buf = buf; 1530 1531 accel_sequence_set_virtbuf(accel_buf->seq, accel_buf); 1532 accel_buf->cb_fn(accel_buf->seq, accel_buf->cb_ctx); 1533 } 1534 1535 bool 1536 spdk_accel_alloc_sequence_buf(struct spdk_accel_sequence *seq, void *buf, 1537 struct spdk_memory_domain *domain, void *domain_ctx, 1538 spdk_accel_sequence_get_buf_cb cb_fn, void *cb_ctx) 1539 { 1540 struct accel_buffer *accel_buf = domain_ctx; 1541 1542 assert(domain == g_accel_domain); 1543 accel_buf->cb_fn = cb_fn; 1544 accel_buf->cb_ctx = cb_ctx; 1545 1546 if (!accel_sequence_alloc_buf(seq, accel_buf, accel_sequence_get_buf_cb)) { 1547 return false; 1548 } 1549 1550 accel_sequence_set_virtbuf(seq, accel_buf); 1551 1552 return true; 1553 } 1554 1555 struct spdk_accel_task * 1556 spdk_accel_sequence_first_task(struct spdk_accel_sequence *seq) 1557 { 1558 return TAILQ_FIRST(&seq->tasks); 1559 } 1560 1561 struct spdk_accel_task * 1562 spdk_accel_sequence_next_task(struct spdk_accel_task *task) 1563 { 1564 return TAILQ_NEXT(task, seq_link); 1565 } 1566 1567 static inline void 1568 accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs, 1569 uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx, 1570 struct accel_buffer *buf) 1571 { 1572 bounce->orig_iovs = *iovs; 1573 bounce->orig_iovcnt = *iovcnt; 1574 bounce->orig_domain = *domain; 1575 bounce->orig_domain_ctx = *domain_ctx; 1576 bounce->iov.iov_base = buf->buf; 1577 bounce->iov.iov_len = buf->len; 1578 1579 *iovs = &bounce->iov; 1580 *iovcnt = 1; 1581 *domain = NULL; 1582 } 1583 1584 static void 1585 accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf) 1586 { 1587 struct spdk_accel_task *task; 1588 struct accel_buffer *accel_buf; 1589 1590 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1591 assert(accel_buf->buf == NULL); 1592 accel_buf->buf = buf; 1593 1594 task = TAILQ_FIRST(&accel_buf->seq->tasks); 1595 assert(task != NULL); 1596 1597 assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF); 1598 accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF); 1599 assert(task->aux); 1600 assert(task->has_aux); 1601 accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain, 1602 &task->src_domain_ctx, accel_buf); 1603 accel_process_sequence(accel_buf->seq); 1604 } 1605 1606 static void 1607 accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf) 1608 { 1609 struct spdk_accel_task *task; 1610 struct accel_buffer *accel_buf; 1611 1612 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1613 assert(accel_buf->buf == NULL); 1614 accel_buf->buf = buf; 1615 1616 task = TAILQ_FIRST(&accel_buf->seq->tasks); 1617 assert(task != NULL); 1618 1619 assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF); 1620 accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF); 1621 assert(task->aux); 1622 assert(task->has_aux); 1623 accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain, 1624 &task->dst_domain_ctx, accel_buf); 1625 accel_process_sequence(accel_buf->seq); 1626 } 1627 1628 static int 1629 accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1630 { 1631 struct accel_buffer *buf; 1632 1633 if (task->src_domain != NULL) { 1634 /* By the time we're here, accel buffers should have been allocated */ 1635 assert(task->src_domain != g_accel_domain); 1636 1637 if (!task->has_aux) { 1638 task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool); 1639 if (spdk_unlikely(!task->aux)) { 1640 SPDK_ERRLOG("Can't allocate aux data structure\n"); 1641 assert(0); 1642 return -EAGAIN; 1643 } 1644 task->has_aux = true; 1645 SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link); 1646 } 1647 buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt)); 1648 if (buf == NULL) { 1649 SPDK_ERRLOG("Couldn't allocate buffer descriptor\n"); 1650 return -ENOMEM; 1651 } 1652 1653 SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link); 1654 if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) { 1655 return -EAGAIN; 1656 } 1657 1658 accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt, 1659 &task->src_domain, &task->src_domain_ctx, buf); 1660 } 1661 1662 if (task->dst_domain != NULL) { 1663 /* By the time we're here, accel buffers should have been allocated */ 1664 assert(task->dst_domain != g_accel_domain); 1665 1666 if (!task->has_aux) { 1667 task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool); 1668 if (spdk_unlikely(!task->aux)) { 1669 SPDK_ERRLOG("Can't allocate aux data structure\n"); 1670 assert(0); 1671 return -EAGAIN; 1672 } 1673 task->has_aux = true; 1674 SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link); 1675 } 1676 buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt)); 1677 if (buf == NULL) { 1678 /* The src buffer will be released when a sequence is completed */ 1679 SPDK_ERRLOG("Couldn't allocate buffer descriptor\n"); 1680 return -ENOMEM; 1681 } 1682 1683 SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link); 1684 if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) { 1685 return -EAGAIN; 1686 } 1687 1688 accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt, 1689 &task->dst_domain, &task->dst_domain_ctx, buf); 1690 } 1691 1692 return 0; 1693 } 1694 1695 static void 1696 accel_task_pull_data_cb(void *ctx, int status) 1697 { 1698 struct spdk_accel_sequence *seq = ctx; 1699 1700 assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA); 1701 if (spdk_likely(status == 0)) { 1702 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK); 1703 } else { 1704 accel_sequence_set_fail(seq, status); 1705 } 1706 1707 accel_process_sequence(seq); 1708 } 1709 1710 static void 1711 accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1712 { 1713 int rc; 1714 1715 assert(task->has_aux); 1716 assert(task->aux); 1717 assert(task->aux->bounce.s.orig_iovs != NULL); 1718 assert(task->aux->bounce.s.orig_domain != NULL); 1719 assert(task->aux->bounce.s.orig_domain != g_accel_domain); 1720 assert(!g_modules_opc[task->op_code].supports_memory_domains); 1721 1722 rc = spdk_memory_domain_pull_data(task->aux->bounce.s.orig_domain, 1723 task->aux->bounce.s.orig_domain_ctx, 1724 task->aux->bounce.s.orig_iovs, task->aux->bounce.s.orig_iovcnt, 1725 task->s.iovs, task->s.iovcnt, 1726 accel_task_pull_data_cb, seq); 1727 if (spdk_unlikely(rc != 0)) { 1728 SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n", 1729 spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc); 1730 accel_sequence_set_fail(seq, rc); 1731 } 1732 } 1733 1734 static void 1735 accel_task_push_data_cb(void *ctx, int status) 1736 { 1737 struct spdk_accel_sequence *seq = ctx; 1738 1739 assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA); 1740 if (spdk_likely(status == 0)) { 1741 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK); 1742 } else { 1743 accel_sequence_set_fail(seq, status); 1744 } 1745 1746 accel_process_sequence(seq); 1747 } 1748 1749 static void 1750 accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1751 { 1752 int rc; 1753 1754 assert(task->has_aux); 1755 assert(task->aux); 1756 assert(task->aux->bounce.d.orig_iovs != NULL); 1757 assert(task->aux->bounce.d.orig_domain != NULL); 1758 assert(task->aux->bounce.d.orig_domain != g_accel_domain); 1759 assert(!g_modules_opc[task->op_code].supports_memory_domains); 1760 1761 rc = spdk_memory_domain_push_data(task->aux->bounce.d.orig_domain, 1762 task->aux->bounce.d.orig_domain_ctx, 1763 task->aux->bounce.d.orig_iovs, task->aux->bounce.d.orig_iovcnt, 1764 task->d.iovs, task->d.iovcnt, 1765 accel_task_push_data_cb, seq); 1766 if (spdk_unlikely(rc != 0)) { 1767 SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n", 1768 spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc); 1769 accel_sequence_set_fail(seq, rc); 1770 } 1771 } 1772 1773 static void 1774 accel_process_sequence(struct spdk_accel_sequence *seq) 1775 { 1776 struct accel_io_channel *accel_ch = seq->ch; 1777 struct spdk_accel_task *task; 1778 enum accel_sequence_state state; 1779 int rc; 1780 1781 /* Prevent recursive calls to this function */ 1782 if (spdk_unlikely(seq->in_process_sequence)) { 1783 return; 1784 } 1785 seq->in_process_sequence = true; 1786 1787 task = TAILQ_FIRST(&seq->tasks); 1788 do { 1789 state = seq->state; 1790 switch (state) { 1791 case ACCEL_SEQUENCE_STATE_INIT: 1792 if (g_accel_driver != NULL) { 1793 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS); 1794 break; 1795 } 1796 /* Fall through */ 1797 case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF: 1798 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF); 1799 if (!accel_sequence_check_virtbuf(seq, task)) { 1800 /* We couldn't allocate a buffer, wait until one is available */ 1801 break; 1802 } 1803 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF); 1804 /* Fall through */ 1805 case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF: 1806 /* If a module supports memory domains, we don't need to allocate bounce 1807 * buffers */ 1808 if (g_modules_opc[task->op_code].supports_memory_domains) { 1809 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK); 1810 break; 1811 } 1812 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF); 1813 rc = accel_sequence_check_bouncebuf(seq, task); 1814 if (spdk_unlikely(rc != 0)) { 1815 /* We couldn't allocate a buffer, wait until one is available */ 1816 if (rc == -EAGAIN) { 1817 break; 1818 } 1819 accel_sequence_set_fail(seq, rc); 1820 break; 1821 } 1822 if (task->has_aux && task->s.iovs == &task->aux->bounce.s.iov) { 1823 assert(task->aux->bounce.s.orig_iovs); 1824 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA); 1825 break; 1826 } 1827 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK); 1828 /* Fall through */ 1829 case ACCEL_SEQUENCE_STATE_EXEC_TASK: 1830 SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n", 1831 g_opcode_strings[task->op_code], seq); 1832 1833 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK); 1834 rc = accel_submit_task(accel_ch, task); 1835 if (spdk_unlikely(rc != 0)) { 1836 SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n", 1837 g_opcode_strings[task->op_code], seq); 1838 accel_sequence_set_fail(seq, rc); 1839 } 1840 break; 1841 case ACCEL_SEQUENCE_STATE_PULL_DATA: 1842 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA); 1843 accel_task_pull_data(seq, task); 1844 break; 1845 case ACCEL_SEQUENCE_STATE_COMPLETE_TASK: 1846 if (task->has_aux && task->d.iovs == &task->aux->bounce.d.iov) { 1847 assert(task->aux->bounce.d.orig_iovs); 1848 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA); 1849 break; 1850 } 1851 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK); 1852 break; 1853 case ACCEL_SEQUENCE_STATE_PUSH_DATA: 1854 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA); 1855 accel_task_push_data(seq, task); 1856 break; 1857 case ACCEL_SEQUENCE_STATE_NEXT_TASK: 1858 accel_sequence_complete_task(seq, task); 1859 /* Check if there are any remaining tasks */ 1860 task = TAILQ_FIRST(&seq->tasks); 1861 if (task == NULL) { 1862 /* Immediately return here to make sure we don't touch the sequence 1863 * after it's completed */ 1864 accel_sequence_complete(seq); 1865 return; 1866 } 1867 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT); 1868 break; 1869 case ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS: 1870 assert(!TAILQ_EMPTY(&seq->tasks)); 1871 1872 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS); 1873 rc = g_accel_driver->execute_sequence(accel_ch->driver_channel, seq); 1874 if (spdk_unlikely(rc != 0)) { 1875 SPDK_ERRLOG("Failed to execute sequence: %p using driver: %s\n", 1876 seq, g_accel_driver->name); 1877 accel_sequence_set_fail(seq, rc); 1878 } 1879 break; 1880 case ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS: 1881 /* Get the task again, as the driver might have completed some tasks 1882 * synchronously */ 1883 task = TAILQ_FIRST(&seq->tasks); 1884 if (task == NULL) { 1885 /* Immediately return here to make sure we don't touch the sequence 1886 * after it's completed */ 1887 accel_sequence_complete(seq); 1888 return; 1889 } 1890 /* We don't want to execute the next task through the driver, so we 1891 * explicitly omit the INIT state here */ 1892 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF); 1893 break; 1894 case ACCEL_SEQUENCE_STATE_ERROR: 1895 /* Immediately return here to make sure we don't touch the sequence 1896 * after it's completed */ 1897 assert(seq->status != 0); 1898 accel_sequence_complete(seq); 1899 return; 1900 case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF: 1901 case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF: 1902 case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA: 1903 case ACCEL_SEQUENCE_STATE_AWAIT_TASK: 1904 case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA: 1905 case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS: 1906 break; 1907 default: 1908 assert(0 && "bad state"); 1909 break; 1910 } 1911 } while (seq->state != state); 1912 1913 seq->in_process_sequence = false; 1914 } 1915 1916 static void 1917 accel_sequence_task_cb(void *cb_arg, int status) 1918 { 1919 struct spdk_accel_sequence *seq = cb_arg; 1920 struct spdk_accel_task *task = TAILQ_FIRST(&seq->tasks); 1921 1922 switch (seq->state) { 1923 case ACCEL_SEQUENCE_STATE_AWAIT_TASK: 1924 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK); 1925 if (spdk_unlikely(status != 0)) { 1926 SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n", 1927 g_opcode_strings[task->op_code], seq); 1928 accel_sequence_set_fail(seq, status); 1929 } 1930 1931 accel_process_sequence(seq); 1932 break; 1933 case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS: 1934 assert(g_accel_driver != NULL); 1935 /* Immediately remove the task from the outstanding list to make sure the next call 1936 * to spdk_accel_sequence_first_task() doesn't return it */ 1937 accel_sequence_complete_task(seq, task); 1938 if (spdk_unlikely(status != 0)) { 1939 SPDK_ERRLOG("Failed to execute %s operation, sequence: %p through " 1940 "driver: %s\n", g_opcode_strings[task->op_code], seq, 1941 g_accel_driver->name); 1942 /* Update status without using accel_sequence_set_fail() to avoid changing 1943 * seq's state to ERROR until driver calls spdk_accel_sequence_continue() */ 1944 seq->status = status; 1945 } 1946 break; 1947 default: 1948 assert(0 && "bad state"); 1949 break; 1950 } 1951 } 1952 1953 void 1954 spdk_accel_sequence_continue(struct spdk_accel_sequence *seq) 1955 { 1956 assert(g_accel_driver != NULL); 1957 assert(seq->state == ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS); 1958 1959 if (spdk_likely(seq->status == 0)) { 1960 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS); 1961 } else { 1962 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR); 1963 } 1964 1965 accel_process_sequence(seq); 1966 } 1967 1968 static bool 1969 accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt) 1970 { 1971 /* For now, just do a dumb check that the iovecs arrays are exactly the same */ 1972 if (iovacnt != iovbcnt) { 1973 return false; 1974 } 1975 1976 return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0; 1977 } 1978 1979 static bool 1980 accel_task_set_dstbuf(struct spdk_accel_task *task, struct spdk_accel_task *next) 1981 { 1982 struct spdk_accel_task *prev; 1983 1984 switch (task->op_code) { 1985 case SPDK_ACCEL_OPC_DECOMPRESS: 1986 case SPDK_ACCEL_OPC_FILL: 1987 case SPDK_ACCEL_OPC_ENCRYPT: 1988 case SPDK_ACCEL_OPC_DECRYPT: 1989 if (task->dst_domain != next->src_domain) { 1990 return false; 1991 } 1992 if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt, 1993 next->s.iovs, next->s.iovcnt)) { 1994 return false; 1995 } 1996 task->d.iovs = next->d.iovs; 1997 task->d.iovcnt = next->d.iovcnt; 1998 task->dst_domain = next->dst_domain; 1999 task->dst_domain_ctx = next->dst_domain_ctx; 2000 break; 2001 case SPDK_ACCEL_OPC_CRC32C: 2002 /* crc32 is special, because it doesn't have a dst buffer */ 2003 if (task->src_domain != next->src_domain) { 2004 return false; 2005 } 2006 if (!accel_compare_iovs(task->s.iovs, task->s.iovcnt, 2007 next->s.iovs, next->s.iovcnt)) { 2008 return false; 2009 } 2010 /* We can only change crc32's buffer if we can change previous task's buffer */ 2011 prev = TAILQ_PREV(task, accel_sequence_tasks, seq_link); 2012 if (prev == NULL) { 2013 return false; 2014 } 2015 if (!accel_task_set_dstbuf(prev, next)) { 2016 return false; 2017 } 2018 task->s.iovs = next->d.iovs; 2019 task->s.iovcnt = next->d.iovcnt; 2020 task->src_domain = next->dst_domain; 2021 task->src_domain_ctx = next->dst_domain_ctx; 2022 break; 2023 default: 2024 return false; 2025 } 2026 2027 return true; 2028 } 2029 2030 static void 2031 accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task, 2032 struct spdk_accel_task **next_task) 2033 { 2034 struct spdk_accel_task *next = *next_task; 2035 2036 switch (task->op_code) { 2037 case SPDK_ACCEL_OPC_COPY: 2038 /* We only allow changing src of operations that actually have a src, e.g. we never 2039 * do it for fill. Theoretically, it is possible, but we'd have to be careful to 2040 * change the src of the operation after fill (which in turn could also be a fill). 2041 * So, for the sake of simplicity, skip this type of operations for now. 2042 */ 2043 if (next->op_code != SPDK_ACCEL_OPC_DECOMPRESS && 2044 next->op_code != SPDK_ACCEL_OPC_COPY && 2045 next->op_code != SPDK_ACCEL_OPC_ENCRYPT && 2046 next->op_code != SPDK_ACCEL_OPC_DECRYPT && 2047 next->op_code != SPDK_ACCEL_OPC_CRC32C) { 2048 break; 2049 } 2050 if (task->dst_domain != next->src_domain) { 2051 break; 2052 } 2053 if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt, 2054 next->s.iovs, next->s.iovcnt)) { 2055 break; 2056 } 2057 next->s.iovs = task->s.iovs; 2058 next->s.iovcnt = task->s.iovcnt; 2059 next->src_domain = task->src_domain; 2060 next->src_domain_ctx = task->src_domain_ctx; 2061 accel_sequence_complete_task(seq, task); 2062 break; 2063 case SPDK_ACCEL_OPC_DECOMPRESS: 2064 case SPDK_ACCEL_OPC_FILL: 2065 case SPDK_ACCEL_OPC_ENCRYPT: 2066 case SPDK_ACCEL_OPC_DECRYPT: 2067 case SPDK_ACCEL_OPC_CRC32C: 2068 /* We can only merge tasks when one of them is a copy */ 2069 if (next->op_code != SPDK_ACCEL_OPC_COPY) { 2070 break; 2071 } 2072 if (!accel_task_set_dstbuf(task, next)) { 2073 break; 2074 } 2075 /* We're removing next_task from the tasks queue, so we need to update its pointer, 2076 * so that the TAILQ_FOREACH_SAFE() loop below works correctly */ 2077 *next_task = TAILQ_NEXT(next, seq_link); 2078 accel_sequence_complete_task(seq, next); 2079 break; 2080 default: 2081 assert(0 && "bad opcode"); 2082 break; 2083 } 2084 } 2085 2086 void 2087 spdk_accel_sequence_finish(struct spdk_accel_sequence *seq, 2088 spdk_accel_completion_cb cb_fn, void *cb_arg) 2089 { 2090 struct spdk_accel_task *task, *next; 2091 2092 /* Try to remove any copy operations if possible */ 2093 TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) { 2094 if (next == NULL) { 2095 break; 2096 } 2097 accel_sequence_merge_tasks(seq, task, &next); 2098 } 2099 2100 seq->cb_fn = cb_fn; 2101 seq->cb_arg = cb_arg; 2102 2103 accel_process_sequence(seq); 2104 } 2105 2106 void 2107 spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq) 2108 { 2109 struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks); 2110 struct spdk_accel_task *task; 2111 2112 TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link); 2113 2114 while (!TAILQ_EMPTY(&tasks)) { 2115 task = TAILQ_FIRST(&tasks); 2116 TAILQ_REMOVE(&tasks, task, seq_link); 2117 TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link); 2118 } 2119 } 2120 2121 void 2122 spdk_accel_sequence_abort(struct spdk_accel_sequence *seq) 2123 { 2124 if (seq == NULL) { 2125 return; 2126 } 2127 2128 accel_sequence_complete_tasks(seq); 2129 accel_sequence_put(seq); 2130 } 2131 2132 struct spdk_memory_domain * 2133 spdk_accel_get_memory_domain(void) 2134 { 2135 return g_accel_domain; 2136 } 2137 2138 static struct spdk_accel_module_if * 2139 _module_find_by_name(const char *name) 2140 { 2141 struct spdk_accel_module_if *accel_module = NULL; 2142 2143 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 2144 if (strcmp(name, accel_module->name) == 0) { 2145 break; 2146 } 2147 } 2148 2149 return accel_module; 2150 } 2151 2152 static inline struct spdk_accel_crypto_key * 2153 _accel_crypto_key_get(const char *name) 2154 { 2155 struct spdk_accel_crypto_key *key; 2156 2157 assert(spdk_spin_held(&g_keyring_spin)); 2158 2159 TAILQ_FOREACH(key, &g_keyring, link) { 2160 if (strcmp(name, key->param.key_name) == 0) { 2161 return key; 2162 } 2163 } 2164 2165 return NULL; 2166 } 2167 2168 static void 2169 accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key) 2170 { 2171 if (key->param.hex_key) { 2172 spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2); 2173 free(key->param.hex_key); 2174 } 2175 if (key->param.hex_key2) { 2176 spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2); 2177 free(key->param.hex_key2); 2178 } 2179 free(key->param.tweak_mode); 2180 free(key->param.key_name); 2181 free(key->param.cipher); 2182 if (key->key) { 2183 spdk_memset_s(key->key, key->key_size, 0, key->key_size); 2184 free(key->key); 2185 } 2186 if (key->key2) { 2187 spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size); 2188 free(key->key2); 2189 } 2190 free(key); 2191 } 2192 2193 static void 2194 accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key) 2195 { 2196 assert(key->module_if); 2197 assert(key->module_if->crypto_key_deinit); 2198 2199 key->module_if->crypto_key_deinit(key); 2200 accel_crypto_key_free_mem(key); 2201 } 2202 2203 /* 2204 * This function mitigates a timing side channel which could be caused by using strcmp() 2205 * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in 2206 * the article [1] for more details 2207 * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html 2208 */ 2209 static bool 2210 accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len) 2211 { 2212 size_t i; 2213 volatile size_t x = k1_len ^ k2_len; 2214 2215 for (i = 0; ((i < k1_len) & (i < k2_len)); i++) { 2216 x |= k1[i] ^ k2[i]; 2217 } 2218 2219 return x == 0; 2220 } 2221 2222 static const char *g_tweak_modes[] = { 2223 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA] = "SIMPLE_LBA", 2224 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_JOIN_NEG_LBA_WITH_LBA] = "JOIN_NEG_LBA_WITH_LBA", 2225 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_FULL_LBA] = "INCR_512_FULL_LBA", 2226 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_UPPER_LBA] = "INCR_512_UPPER_LBA", 2227 }; 2228 2229 static const char *g_ciphers[] = { 2230 [SPDK_ACCEL_CIPHER_AES_CBC] = "AES_CBC", 2231 [SPDK_ACCEL_CIPHER_AES_XTS] = "AES_XTS", 2232 }; 2233 2234 int 2235 spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param) 2236 { 2237 struct spdk_accel_module_if *module; 2238 struct spdk_accel_crypto_key *key; 2239 size_t hex_key_size, hex_key2_size; 2240 bool found = false; 2241 size_t i; 2242 int rc; 2243 2244 if (!param || !param->hex_key || !param->cipher || !param->key_name) { 2245 return -EINVAL; 2246 } 2247 2248 if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) { 2249 /* hardly ever possible, but let's check and warn the user */ 2250 SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n"); 2251 } 2252 module = g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module; 2253 2254 if (!module) { 2255 SPDK_ERRLOG("No accel module found assigned for crypto operation\n"); 2256 return -ENOENT; 2257 } 2258 2259 if (!module->crypto_key_init || !module->crypto_supports_cipher) { 2260 SPDK_ERRLOG("Module %s doesn't support crypto operations\n", module->name); 2261 return -ENOTSUP; 2262 } 2263 2264 key = calloc(1, sizeof(*key)); 2265 if (!key) { 2266 return -ENOMEM; 2267 } 2268 2269 key->param.key_name = strdup(param->key_name); 2270 if (!key->param.key_name) { 2271 rc = -ENOMEM; 2272 goto error; 2273 } 2274 2275 for (i = 0; i < SPDK_COUNTOF(g_ciphers); ++i) { 2276 assert(g_ciphers[i]); 2277 2278 if (strncmp(param->cipher, g_ciphers[i], strlen(g_ciphers[i])) == 0) { 2279 key->cipher = i; 2280 found = true; 2281 break; 2282 } 2283 } 2284 2285 if (!found) { 2286 SPDK_ERRLOG("Failed to parse cipher\n"); 2287 rc = -EINVAL; 2288 goto error; 2289 } 2290 2291 key->param.cipher = strdup(param->cipher); 2292 if (!key->param.cipher) { 2293 rc = -ENOMEM; 2294 goto error; 2295 } 2296 2297 hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2298 if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) { 2299 SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2300 rc = -EINVAL; 2301 goto error; 2302 } 2303 2304 if (hex_key_size == 0) { 2305 SPDK_ERRLOG("key1 size cannot be 0\n"); 2306 rc = -EINVAL; 2307 goto error; 2308 } 2309 2310 key->param.hex_key = strdup(param->hex_key); 2311 if (!key->param.hex_key) { 2312 rc = -ENOMEM; 2313 goto error; 2314 } 2315 2316 key->key_size = hex_key_size / 2; 2317 key->key = spdk_unhexlify(key->param.hex_key); 2318 if (!key->key) { 2319 SPDK_ERRLOG("Failed to unhexlify key1\n"); 2320 rc = -EINVAL; 2321 goto error; 2322 } 2323 2324 if (param->hex_key2) { 2325 hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2326 if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) { 2327 SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2328 rc = -EINVAL; 2329 goto error; 2330 } 2331 2332 if (hex_key2_size == 0) { 2333 SPDK_ERRLOG("key2 size cannot be 0\n"); 2334 rc = -EINVAL; 2335 goto error; 2336 } 2337 2338 key->param.hex_key2 = strdup(param->hex_key2); 2339 if (!key->param.hex_key2) { 2340 rc = -ENOMEM; 2341 goto error; 2342 } 2343 2344 key->key2_size = hex_key2_size / 2; 2345 key->key2 = spdk_unhexlify(key->param.hex_key2); 2346 if (!key->key2) { 2347 SPDK_ERRLOG("Failed to unhexlify key2\n"); 2348 rc = -EINVAL; 2349 goto error; 2350 } 2351 } 2352 2353 key->tweak_mode = ACCEL_CRYPTO_TWEAK_MODE_DEFAULT; 2354 if (param->tweak_mode) { 2355 found = false; 2356 2357 key->param.tweak_mode = strdup(param->tweak_mode); 2358 if (!key->param.tweak_mode) { 2359 rc = -ENOMEM; 2360 goto error; 2361 } 2362 2363 for (i = 0; i < SPDK_COUNTOF(g_tweak_modes); ++i) { 2364 assert(g_tweak_modes[i]); 2365 2366 if (strncmp(param->tweak_mode, g_tweak_modes[i], strlen(g_tweak_modes[i])) == 0) { 2367 key->tweak_mode = i; 2368 found = true; 2369 break; 2370 } 2371 } 2372 2373 if (!found) { 2374 SPDK_ERRLOG("Failed to parse tweak mode\n"); 2375 rc = -EINVAL; 2376 goto error; 2377 } 2378 } 2379 2380 if ((!module->crypto_supports_tweak_mode && key->tweak_mode != ACCEL_CRYPTO_TWEAK_MODE_DEFAULT) || 2381 (module->crypto_supports_tweak_mode && !module->crypto_supports_tweak_mode(key->tweak_mode))) { 2382 SPDK_ERRLOG("Module %s doesn't support %s tweak mode\n", module->name, 2383 g_tweak_modes[key->tweak_mode]); 2384 rc = -EINVAL; 2385 goto error; 2386 } 2387 2388 if (!module->crypto_supports_cipher(key->cipher, key->key_size)) { 2389 SPDK_ERRLOG("Module %s doesn't support %s cipher with %zu key size\n", module->name, 2390 g_ciphers[key->cipher], key->key_size); 2391 rc = -EINVAL; 2392 goto error; 2393 } 2394 2395 if (key->cipher == SPDK_ACCEL_CIPHER_AES_XTS) { 2396 if (!key->key2) { 2397 SPDK_ERRLOG("%s key2 is missing\n", g_ciphers[key->cipher]); 2398 rc = -EINVAL; 2399 goto error; 2400 } 2401 2402 if (key->key_size != key->key2_size) { 2403 SPDK_ERRLOG("%s key size %zu is not equal to key2 size %zu\n", g_ciphers[key->cipher], 2404 key->key_size, 2405 key->key2_size); 2406 rc = -EINVAL; 2407 goto error; 2408 } 2409 2410 if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) { 2411 SPDK_ERRLOG("%s identical keys are not secure\n", g_ciphers[key->cipher]); 2412 rc = -EINVAL; 2413 goto error; 2414 } 2415 } 2416 2417 if (key->cipher == SPDK_ACCEL_CIPHER_AES_CBC) { 2418 if (key->key2_size) { 2419 SPDK_ERRLOG("%s doesn't use key2\n", g_ciphers[key->cipher]); 2420 rc = -EINVAL; 2421 goto error; 2422 } 2423 } 2424 2425 key->module_if = module; 2426 2427 spdk_spin_lock(&g_keyring_spin); 2428 if (_accel_crypto_key_get(param->key_name)) { 2429 rc = -EEXIST; 2430 } else { 2431 rc = module->crypto_key_init(key); 2432 if (rc) { 2433 SPDK_ERRLOG("Module %s failed to initialize crypto key\n", module->name); 2434 } else { 2435 TAILQ_INSERT_TAIL(&g_keyring, key, link); 2436 } 2437 } 2438 spdk_spin_unlock(&g_keyring_spin); 2439 2440 if (rc) { 2441 goto error; 2442 } 2443 2444 return 0; 2445 2446 error: 2447 accel_crypto_key_free_mem(key); 2448 return rc; 2449 } 2450 2451 int 2452 spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key) 2453 { 2454 if (!key || !key->module_if) { 2455 return -EINVAL; 2456 } 2457 2458 spdk_spin_lock(&g_keyring_spin); 2459 if (!_accel_crypto_key_get(key->param.key_name)) { 2460 spdk_spin_unlock(&g_keyring_spin); 2461 return -ENOENT; 2462 } 2463 TAILQ_REMOVE(&g_keyring, key, link); 2464 spdk_spin_unlock(&g_keyring_spin); 2465 2466 accel_crypto_key_destroy_unsafe(key); 2467 2468 return 0; 2469 } 2470 2471 struct spdk_accel_crypto_key * 2472 spdk_accel_crypto_key_get(const char *name) 2473 { 2474 struct spdk_accel_crypto_key *key; 2475 2476 spdk_spin_lock(&g_keyring_spin); 2477 key = _accel_crypto_key_get(name); 2478 spdk_spin_unlock(&g_keyring_spin); 2479 2480 return key; 2481 } 2482 2483 /* Helper function when accel modules register with the framework. */ 2484 void 2485 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module) 2486 { 2487 struct spdk_accel_module_if *tmp; 2488 2489 if (_module_find_by_name(accel_module->name)) { 2490 SPDK_NOTICELOG("Module %s already registered\n", accel_module->name); 2491 assert(false); 2492 return; 2493 } 2494 2495 TAILQ_FOREACH(tmp, &spdk_accel_module_list, tailq) { 2496 if (accel_module->priority < tmp->priority) { 2497 break; 2498 } 2499 } 2500 2501 if (tmp != NULL) { 2502 TAILQ_INSERT_BEFORE(tmp, accel_module, tailq); 2503 } else { 2504 TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq); 2505 } 2506 } 2507 2508 /* Framework level channel create callback. */ 2509 static int 2510 accel_create_channel(void *io_device, void *ctx_buf) 2511 { 2512 struct accel_io_channel *accel_ch = ctx_buf; 2513 struct spdk_accel_task *accel_task; 2514 struct spdk_accel_task_aux_data *accel_task_aux; 2515 struct spdk_accel_sequence *seq; 2516 struct accel_buffer *buf; 2517 size_t task_size_aligned; 2518 uint8_t *task_mem; 2519 uint32_t i = 0, j; 2520 int rc; 2521 2522 task_size_aligned = SPDK_ALIGN_CEIL(g_max_accel_module_size, SPDK_CACHE_LINE_SIZE); 2523 accel_ch->task_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE, 2524 g_opts.task_count * task_size_aligned); 2525 if (!accel_ch->task_pool_base) { 2526 return -ENOMEM; 2527 } 2528 memset(accel_ch->task_pool_base, 0, g_opts.task_count * task_size_aligned); 2529 2530 accel_ch->seq_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE, 2531 g_opts.sequence_count * sizeof(struct spdk_accel_sequence)); 2532 if (accel_ch->seq_pool_base == NULL) { 2533 goto err; 2534 } 2535 memset(accel_ch->seq_pool_base, 0, g_opts.sequence_count * sizeof(struct spdk_accel_sequence)); 2536 2537 accel_ch->task_aux_data_base = calloc(g_opts.task_count, sizeof(struct spdk_accel_task_aux_data)); 2538 if (accel_ch->task_aux_data_base == NULL) { 2539 goto err; 2540 } 2541 2542 accel_ch->buf_pool_base = calloc(g_opts.buf_count, sizeof(struct accel_buffer)); 2543 if (accel_ch->buf_pool_base == NULL) { 2544 goto err; 2545 } 2546 2547 STAILQ_INIT(&accel_ch->task_pool); 2548 SLIST_INIT(&accel_ch->task_aux_data_pool); 2549 SLIST_INIT(&accel_ch->seq_pool); 2550 SLIST_INIT(&accel_ch->buf_pool); 2551 2552 task_mem = accel_ch->task_pool_base; 2553 for (i = 0; i < g_opts.task_count; i++) { 2554 accel_task = (struct spdk_accel_task *)task_mem; 2555 accel_task->aux = NULL; 2556 STAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link); 2557 task_mem += task_size_aligned; 2558 accel_task_aux = &accel_ch->task_aux_data_base[i]; 2559 SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task_aux, link); 2560 } 2561 for (i = 0; i < g_opts.sequence_count; i++) { 2562 seq = &accel_ch->seq_pool_base[i]; 2563 SLIST_INSERT_HEAD(&accel_ch->seq_pool, seq, link); 2564 } 2565 for (i = 0; i < g_opts.buf_count; i++) { 2566 buf = &accel_ch->buf_pool_base[i]; 2567 SLIST_INSERT_HEAD(&accel_ch->buf_pool, buf, link); 2568 } 2569 2570 /* Assign modules and get IO channels for each */ 2571 for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) { 2572 accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel(); 2573 /* This can happen if idxd runs out of channels. */ 2574 if (accel_ch->module_ch[i] == NULL) { 2575 SPDK_ERRLOG("Module %s failed to get io channel\n", g_modules_opc[i].module->name); 2576 goto err; 2577 } 2578 } 2579 2580 if (g_accel_driver != NULL) { 2581 accel_ch->driver_channel = g_accel_driver->get_io_channel(); 2582 if (accel_ch->driver_channel == NULL) { 2583 SPDK_ERRLOG("Failed to get driver's IO channel\n"); 2584 goto err; 2585 } 2586 } 2587 2588 rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", g_opts.small_cache_size, 2589 g_opts.large_cache_size); 2590 if (rc != 0) { 2591 SPDK_ERRLOG("Failed to initialize iobuf accel channel\n"); 2592 goto err; 2593 } 2594 2595 return 0; 2596 err: 2597 if (accel_ch->driver_channel != NULL) { 2598 spdk_put_io_channel(accel_ch->driver_channel); 2599 } 2600 for (j = 0; j < i; j++) { 2601 spdk_put_io_channel(accel_ch->module_ch[j]); 2602 } 2603 free(accel_ch->task_pool_base); 2604 free(accel_ch->task_aux_data_base); 2605 free(accel_ch->seq_pool_base); 2606 free(accel_ch->buf_pool_base); 2607 2608 return -ENOMEM; 2609 } 2610 2611 static void 2612 accel_add_stats(struct accel_stats *total, struct accel_stats *stats) 2613 { 2614 int i; 2615 2616 total->sequence_executed += stats->sequence_executed; 2617 total->sequence_failed += stats->sequence_failed; 2618 total->retry.task += stats->retry.task; 2619 total->retry.sequence += stats->retry.sequence; 2620 total->retry.iobuf += stats->retry.iobuf; 2621 total->retry.bufdesc += stats->retry.bufdesc; 2622 for (i = 0; i < SPDK_ACCEL_OPC_LAST; ++i) { 2623 total->operations[i].executed += stats->operations[i].executed; 2624 total->operations[i].failed += stats->operations[i].failed; 2625 total->operations[i].num_bytes += stats->operations[i].num_bytes; 2626 } 2627 } 2628 2629 /* Framework level channel destroy callback. */ 2630 static void 2631 accel_destroy_channel(void *io_device, void *ctx_buf) 2632 { 2633 struct accel_io_channel *accel_ch = ctx_buf; 2634 int i; 2635 2636 spdk_iobuf_channel_fini(&accel_ch->iobuf); 2637 2638 if (accel_ch->driver_channel != NULL) { 2639 spdk_put_io_channel(accel_ch->driver_channel); 2640 } 2641 2642 for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) { 2643 assert(accel_ch->module_ch[i] != NULL); 2644 spdk_put_io_channel(accel_ch->module_ch[i]); 2645 accel_ch->module_ch[i] = NULL; 2646 } 2647 2648 /* Update global stats to make sure channel's stats aren't lost after a channel is gone */ 2649 spdk_spin_lock(&g_stats_lock); 2650 accel_add_stats(&g_stats, &accel_ch->stats); 2651 spdk_spin_unlock(&g_stats_lock); 2652 2653 free(accel_ch->task_pool_base); 2654 free(accel_ch->task_aux_data_base); 2655 free(accel_ch->seq_pool_base); 2656 free(accel_ch->buf_pool_base); 2657 } 2658 2659 struct spdk_io_channel * 2660 spdk_accel_get_io_channel(void) 2661 { 2662 return spdk_get_io_channel(&spdk_accel_module_list); 2663 } 2664 2665 static int 2666 accel_module_initialize(void) 2667 { 2668 struct spdk_accel_module_if *accel_module, *tmp_module; 2669 int rc = 0, module_rc; 2670 2671 TAILQ_FOREACH_SAFE(accel_module, &spdk_accel_module_list, tailq, tmp_module) { 2672 module_rc = accel_module->module_init(); 2673 if (module_rc) { 2674 SPDK_ERRLOG("Module %s initialization failed with %d\n", accel_module->name, module_rc); 2675 TAILQ_REMOVE(&spdk_accel_module_list, accel_module, tailq); 2676 if (!rc) { 2677 rc = module_rc; 2678 } 2679 } 2680 2681 SPDK_DEBUGLOG(accel, "Module %s initialized.\n", accel_module->name); 2682 } 2683 2684 return rc; 2685 } 2686 2687 static void 2688 accel_module_init_opcode(enum spdk_accel_opcode opcode) 2689 { 2690 struct accel_module *module = &g_modules_opc[opcode]; 2691 struct spdk_accel_module_if *module_if = module->module; 2692 2693 if (module_if->get_memory_domains != NULL) { 2694 module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0; 2695 } 2696 } 2697 2698 int 2699 spdk_accel_initialize(void) 2700 { 2701 enum spdk_accel_opcode op; 2702 struct spdk_accel_module_if *accel_module = NULL; 2703 int rc; 2704 2705 /* 2706 * We need a unique identifier for the accel framework, so use the 2707 * spdk_accel_module_list address for this purpose. 2708 */ 2709 spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel, 2710 sizeof(struct accel_io_channel), "accel"); 2711 2712 spdk_spin_init(&g_keyring_spin); 2713 spdk_spin_init(&g_stats_lock); 2714 2715 rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL, 2716 "SPDK_ACCEL_DMA_DEVICE"); 2717 if (rc != 0) { 2718 SPDK_ERRLOG("Failed to create accel memory domain\n"); 2719 return rc; 2720 } 2721 2722 g_modules_started = true; 2723 rc = accel_module_initialize(); 2724 if (rc) { 2725 return rc; 2726 } 2727 2728 if (g_accel_driver != NULL && g_accel_driver->init != NULL) { 2729 rc = g_accel_driver->init(); 2730 if (rc != 0) { 2731 SPDK_ERRLOG("Failed to initialize driver %s: %s\n", g_accel_driver->name, 2732 spdk_strerror(-rc)); 2733 return rc; 2734 } 2735 } 2736 2737 /* The module list is order by priority, with the highest priority modules being at the end 2738 * of the list. The software module should be somewhere at the beginning of the list, 2739 * before all HW modules. 2740 * NOTE: all opcodes must be supported by software in the event that no HW modules are 2741 * initialized to support the operation. 2742 */ 2743 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 2744 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 2745 if (accel_module->supports_opcode(op)) { 2746 g_modules_opc[op].module = accel_module; 2747 SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name); 2748 } 2749 } 2750 2751 if (accel_module->get_ctx_size != NULL) { 2752 g_max_accel_module_size = spdk_max(g_max_accel_module_size, 2753 accel_module->get_ctx_size()); 2754 } 2755 } 2756 2757 /* Now lets check for overrides and apply all that exist */ 2758 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 2759 if (g_modules_opc_override[op] != NULL) { 2760 accel_module = _module_find_by_name(g_modules_opc_override[op]); 2761 if (accel_module == NULL) { 2762 SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]); 2763 return -EINVAL; 2764 } 2765 if (accel_module->supports_opcode(op) == false) { 2766 SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op); 2767 return -EINVAL; 2768 } 2769 g_modules_opc[op].module = accel_module; 2770 } 2771 } 2772 2773 if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) { 2774 SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations"); 2775 return -EINVAL; 2776 } 2777 2778 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 2779 assert(g_modules_opc[op].module != NULL); 2780 accel_module_init_opcode(op); 2781 } 2782 2783 rc = spdk_iobuf_register_module("accel"); 2784 if (rc != 0) { 2785 SPDK_ERRLOG("Failed to register accel iobuf module\n"); 2786 return rc; 2787 } 2788 2789 return 0; 2790 } 2791 2792 static void 2793 accel_module_finish_cb(void) 2794 { 2795 spdk_accel_fini_cb cb_fn = g_fini_cb_fn; 2796 2797 cb_fn(g_fini_cb_arg); 2798 g_fini_cb_fn = NULL; 2799 g_fini_cb_arg = NULL; 2800 } 2801 2802 static void 2803 accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str, 2804 const char *module_str) 2805 { 2806 spdk_json_write_object_begin(w); 2807 spdk_json_write_named_string(w, "method", "accel_assign_opc"); 2808 spdk_json_write_named_object_begin(w, "params"); 2809 spdk_json_write_named_string(w, "opname", opc_str); 2810 spdk_json_write_named_string(w, "module", module_str); 2811 spdk_json_write_object_end(w); 2812 spdk_json_write_object_end(w); 2813 } 2814 2815 static void 2816 __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key) 2817 { 2818 spdk_json_write_named_string(w, "name", key->param.key_name); 2819 spdk_json_write_named_string(w, "cipher", key->param.cipher); 2820 spdk_json_write_named_string(w, "key", key->param.hex_key); 2821 if (key->param.hex_key2) { 2822 spdk_json_write_named_string(w, "key2", key->param.hex_key2); 2823 } 2824 2825 if (key->param.tweak_mode) { 2826 spdk_json_write_named_string(w, "tweak_mode", key->param.tweak_mode); 2827 } 2828 } 2829 2830 void 2831 _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key) 2832 { 2833 spdk_json_write_object_begin(w); 2834 __accel_crypto_key_dump_param(w, key); 2835 spdk_json_write_object_end(w); 2836 } 2837 2838 static void 2839 _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w, 2840 struct spdk_accel_crypto_key *key) 2841 { 2842 spdk_json_write_object_begin(w); 2843 spdk_json_write_named_string(w, "method", "accel_crypto_key_create"); 2844 spdk_json_write_named_object_begin(w, "params"); 2845 __accel_crypto_key_dump_param(w, key); 2846 spdk_json_write_object_end(w); 2847 spdk_json_write_object_end(w); 2848 } 2849 2850 static void 2851 accel_write_options(struct spdk_json_write_ctx *w) 2852 { 2853 spdk_json_write_object_begin(w); 2854 spdk_json_write_named_string(w, "method", "accel_set_options"); 2855 spdk_json_write_named_object_begin(w, "params"); 2856 spdk_json_write_named_uint32(w, "small_cache_size", g_opts.small_cache_size); 2857 spdk_json_write_named_uint32(w, "large_cache_size", g_opts.large_cache_size); 2858 spdk_json_write_named_uint32(w, "task_count", g_opts.task_count); 2859 spdk_json_write_named_uint32(w, "sequence_count", g_opts.sequence_count); 2860 spdk_json_write_named_uint32(w, "buf_count", g_opts.buf_count); 2861 spdk_json_write_object_end(w); 2862 spdk_json_write_object_end(w); 2863 } 2864 2865 static void 2866 _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump) 2867 { 2868 struct spdk_accel_crypto_key *key; 2869 2870 spdk_spin_lock(&g_keyring_spin); 2871 TAILQ_FOREACH(key, &g_keyring, link) { 2872 if (full_dump) { 2873 _accel_crypto_key_write_config_json(w, key); 2874 } else { 2875 _accel_crypto_key_dump_param(w, key); 2876 } 2877 } 2878 spdk_spin_unlock(&g_keyring_spin); 2879 } 2880 2881 void 2882 _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w) 2883 { 2884 _accel_crypto_keys_write_config_json(w, false); 2885 } 2886 2887 void 2888 spdk_accel_write_config_json(struct spdk_json_write_ctx *w) 2889 { 2890 struct spdk_accel_module_if *accel_module; 2891 int i; 2892 2893 spdk_json_write_array_begin(w); 2894 accel_write_options(w); 2895 2896 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 2897 if (accel_module->write_config_json) { 2898 accel_module->write_config_json(w); 2899 } 2900 } 2901 for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) { 2902 if (g_modules_opc_override[i]) { 2903 accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]); 2904 } 2905 } 2906 2907 _accel_crypto_keys_write_config_json(w, true); 2908 2909 spdk_json_write_array_end(w); 2910 } 2911 2912 void 2913 spdk_accel_module_finish(void) 2914 { 2915 if (!g_accel_module) { 2916 g_accel_module = TAILQ_FIRST(&spdk_accel_module_list); 2917 } else { 2918 g_accel_module = TAILQ_NEXT(g_accel_module, tailq); 2919 } 2920 2921 if (!g_accel_module) { 2922 if (g_accel_driver != NULL && g_accel_driver->fini != NULL) { 2923 g_accel_driver->fini(); 2924 } 2925 2926 spdk_spin_destroy(&g_keyring_spin); 2927 spdk_spin_destroy(&g_stats_lock); 2928 if (g_accel_domain) { 2929 spdk_memory_domain_destroy(g_accel_domain); 2930 g_accel_domain = NULL; 2931 } 2932 accel_module_finish_cb(); 2933 return; 2934 } 2935 2936 if (g_accel_module->module_fini) { 2937 spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL); 2938 } else { 2939 spdk_accel_module_finish(); 2940 } 2941 } 2942 2943 static void 2944 accel_io_device_unregister_cb(void *io_device) 2945 { 2946 struct spdk_accel_crypto_key *key, *key_tmp; 2947 enum spdk_accel_opcode op; 2948 2949 spdk_spin_lock(&g_keyring_spin); 2950 TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) { 2951 accel_crypto_key_destroy_unsafe(key); 2952 } 2953 spdk_spin_unlock(&g_keyring_spin); 2954 2955 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 2956 if (g_modules_opc_override[op] != NULL) { 2957 free(g_modules_opc_override[op]); 2958 g_modules_opc_override[op] = NULL; 2959 } 2960 g_modules_opc[op].module = NULL; 2961 } 2962 2963 spdk_accel_module_finish(); 2964 } 2965 2966 void 2967 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg) 2968 { 2969 assert(cb_fn != NULL); 2970 2971 g_fini_cb_fn = cb_fn; 2972 g_fini_cb_arg = cb_arg; 2973 2974 spdk_io_device_unregister(&spdk_accel_module_list, accel_io_device_unregister_cb); 2975 } 2976 2977 static struct spdk_accel_driver * 2978 accel_find_driver(const char *name) 2979 { 2980 struct spdk_accel_driver *driver; 2981 2982 TAILQ_FOREACH(driver, &g_accel_drivers, tailq) { 2983 if (strcmp(driver->name, name) == 0) { 2984 return driver; 2985 } 2986 } 2987 2988 return NULL; 2989 } 2990 2991 int 2992 spdk_accel_set_driver(const char *name) 2993 { 2994 struct spdk_accel_driver *driver; 2995 2996 driver = accel_find_driver(name); 2997 if (driver == NULL) { 2998 SPDK_ERRLOG("Couldn't find driver named '%s'\n", name); 2999 return -ENODEV; 3000 } 3001 3002 g_accel_driver = driver; 3003 3004 return 0; 3005 } 3006 3007 void 3008 spdk_accel_driver_register(struct spdk_accel_driver *driver) 3009 { 3010 if (accel_find_driver(driver->name)) { 3011 SPDK_ERRLOG("Driver named '%s' has already been registered\n", driver->name); 3012 assert(0); 3013 return; 3014 } 3015 3016 TAILQ_INSERT_TAIL(&g_accel_drivers, driver, tailq); 3017 } 3018 3019 int 3020 spdk_accel_set_opts(const struct spdk_accel_opts *opts) 3021 { 3022 if (opts->size > sizeof(*opts)) { 3023 return -EINVAL; 3024 } 3025 3026 memcpy(&g_opts, opts, opts->size); 3027 3028 return 0; 3029 } 3030 3031 void 3032 spdk_accel_get_opts(struct spdk_accel_opts *opts) 3033 { 3034 size_t size = opts->size; 3035 3036 assert(size <= sizeof(*opts)); 3037 3038 memcpy(opts, &g_opts, spdk_min(sizeof(*opts), size)); 3039 opts->size = size; 3040 } 3041 3042 struct accel_get_stats_ctx { 3043 struct accel_stats stats; 3044 accel_get_stats_cb cb_fn; 3045 void *cb_arg; 3046 }; 3047 3048 static void 3049 accel_get_channel_stats_done(struct spdk_io_channel_iter *iter, int status) 3050 { 3051 struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter); 3052 3053 ctx->cb_fn(&ctx->stats, ctx->cb_arg); 3054 free(ctx); 3055 } 3056 3057 static void 3058 accel_get_channel_stats(struct spdk_io_channel_iter *iter) 3059 { 3060 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(iter); 3061 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 3062 struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter); 3063 3064 accel_add_stats(&ctx->stats, &accel_ch->stats); 3065 spdk_for_each_channel_continue(iter, 0); 3066 } 3067 3068 int 3069 accel_get_stats(accel_get_stats_cb cb_fn, void *cb_arg) 3070 { 3071 struct accel_get_stats_ctx *ctx; 3072 3073 ctx = calloc(1, sizeof(*ctx)); 3074 if (ctx == NULL) { 3075 return -ENOMEM; 3076 } 3077 3078 spdk_spin_lock(&g_stats_lock); 3079 accel_add_stats(&ctx->stats, &g_stats); 3080 spdk_spin_unlock(&g_stats_lock); 3081 3082 ctx->cb_fn = cb_fn; 3083 ctx->cb_arg = cb_arg; 3084 3085 spdk_for_each_channel(&spdk_accel_module_list, accel_get_channel_stats, ctx, 3086 accel_get_channel_stats_done); 3087 3088 return 0; 3089 } 3090 3091 void 3092 spdk_accel_get_opcode_stats(struct spdk_io_channel *ch, enum spdk_accel_opcode opcode, 3093 struct spdk_accel_opcode_stats *stats, size_t size) 3094 { 3095 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 3096 3097 #define FIELD_OK(field) \ 3098 offsetof(struct spdk_accel_opcode_stats, field) + sizeof(stats->field) <= size 3099 3100 #define SET_FIELD(field, value) \ 3101 if (FIELD_OK(field)) { \ 3102 stats->field = value; \ 3103 } 3104 3105 SET_FIELD(executed, accel_ch->stats.operations[opcode].executed); 3106 SET_FIELD(failed, accel_ch->stats.operations[opcode].failed); 3107 SET_FIELD(num_bytes, accel_ch->stats.operations[opcode].num_bytes); 3108 3109 #undef FIELD_OK 3110 #undef SET_FIELD 3111 } 3112 3113 uint8_t 3114 spdk_accel_get_buf_align(enum spdk_accel_opcode opcode, 3115 const struct spdk_accel_operation_exec_ctx *ctx) 3116 { 3117 struct spdk_accel_module_if *module = g_modules_opc[opcode].module; 3118 struct spdk_accel_opcode_info modinfo = {}, drvinfo = {}; 3119 3120 if (g_accel_driver != NULL && g_accel_driver->get_operation_info != NULL) { 3121 g_accel_driver->get_operation_info(opcode, ctx, &drvinfo); 3122 } 3123 3124 if (module->get_operation_info != NULL) { 3125 module->get_operation_info(opcode, ctx, &modinfo); 3126 } 3127 3128 /* If a driver is set, it'll execute most of the operations, while the rest will usually 3129 * fall back to accel_sw, which doesn't have any alignment requiremenets. However, to be 3130 * extra safe, return the max(driver, module) if a driver delegates some operations to a 3131 * hardware module. */ 3132 return spdk_max(modinfo.required_alignment, drvinfo.required_alignment); 3133 } 3134 3135 struct spdk_accel_module_if * 3136 spdk_accel_get_module(const char *name) 3137 { 3138 struct spdk_accel_module_if *module; 3139 3140 TAILQ_FOREACH(module, &spdk_accel_module_list, tailq) { 3141 if (strcmp(module->name, name) == 0) { 3142 return module; 3143 } 3144 } 3145 3146 return NULL; 3147 } 3148 3149 SPDK_LOG_REGISTER_COMPONENT(accel) 3150