1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2020 Intel Corporation. 3 * Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES. 4 * All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk/accel_module.h" 10 11 #include "accel_internal.h" 12 13 #include "spdk/dma.h" 14 #include "spdk/env.h" 15 #include "spdk/likely.h" 16 #include "spdk/log.h" 17 #include "spdk/thread.h" 18 #include "spdk/json.h" 19 #include "spdk/crc32.h" 20 #include "spdk/util.h" 21 #include "spdk/hexlify.h" 22 #include "spdk/string.h" 23 24 /* Accelerator Framework: The following provides a top level 25 * generic API for the accelerator functions defined here. Modules, 26 * such as the one in /module/accel/ioat, supply the implementation 27 * with the exception of the pure software implementation contained 28 * later in this file. 29 */ 30 31 #define ALIGN_4K 0x1000 32 #define ACCEL_TASKS_PER_CHANNEL 2048 33 #define ACCEL_SMALL_CACHE_SIZE 128 34 #define ACCEL_LARGE_CACHE_SIZE 16 35 /* Set MSB, so we don't return NULL pointers as buffers */ 36 #define ACCEL_BUFFER_BASE ((void *)(1ull << 63)) 37 #define ACCEL_BUFFER_OFFSET_MASK ((uintptr_t)ACCEL_BUFFER_BASE - 1) 38 39 #define ACCEL_CRYPTO_TWEAK_MODE_DEFAULT SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA 40 #define ACCEL_TASKS_IN_SEQUENCE_LIMIT 8 41 42 struct accel_module { 43 struct spdk_accel_module_if *module; 44 bool supports_memory_domains; 45 }; 46 47 /* Largest context size for all accel modules */ 48 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task); 49 50 static struct spdk_accel_module_if *g_accel_module = NULL; 51 static spdk_accel_fini_cb g_fini_cb_fn = NULL; 52 static void *g_fini_cb_arg = NULL; 53 static bool g_modules_started = false; 54 static struct spdk_memory_domain *g_accel_domain; 55 56 /* Global list of registered accelerator modules */ 57 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list = 58 TAILQ_HEAD_INITIALIZER(spdk_accel_module_list); 59 60 /* Crypto keyring */ 61 static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring); 62 static struct spdk_spinlock g_keyring_spin; 63 64 /* Global array mapping capabilities to modules */ 65 static struct accel_module g_modules_opc[SPDK_ACCEL_OPC_LAST] = {}; 66 static char *g_modules_opc_override[SPDK_ACCEL_OPC_LAST] = {}; 67 TAILQ_HEAD(, spdk_accel_driver) g_accel_drivers = TAILQ_HEAD_INITIALIZER(g_accel_drivers); 68 static struct spdk_accel_driver *g_accel_driver; 69 static struct spdk_accel_opts g_opts = { 70 .small_cache_size = ACCEL_SMALL_CACHE_SIZE, 71 .large_cache_size = ACCEL_LARGE_CACHE_SIZE, 72 .task_count = ACCEL_TASKS_PER_CHANNEL, 73 .sequence_count = ACCEL_TASKS_PER_CHANNEL, 74 .buf_count = ACCEL_TASKS_PER_CHANNEL, 75 }; 76 static struct accel_stats g_stats; 77 static struct spdk_spinlock g_stats_lock; 78 79 static const char *g_opcode_strings[SPDK_ACCEL_OPC_LAST] = { 80 "copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c", 81 "compress", "decompress", "encrypt", "decrypt", "xor", 82 "dif_verify", "dif_verify_copy", "dif_generate", "dif_generate_copy" 83 }; 84 85 enum accel_sequence_state { 86 ACCEL_SEQUENCE_STATE_INIT, 87 ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF, 88 ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF, 89 ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF, 90 ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF, 91 ACCEL_SEQUENCE_STATE_PULL_DATA, 92 ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA, 93 ACCEL_SEQUENCE_STATE_EXEC_TASK, 94 ACCEL_SEQUENCE_STATE_AWAIT_TASK, 95 ACCEL_SEQUENCE_STATE_COMPLETE_TASK, 96 ACCEL_SEQUENCE_STATE_NEXT_TASK, 97 ACCEL_SEQUENCE_STATE_PUSH_DATA, 98 ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA, 99 ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS, 100 ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS, 101 ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS, 102 ACCEL_SEQUENCE_STATE_ERROR, 103 ACCEL_SEQUENCE_STATE_MAX, 104 }; 105 106 static const char *g_seq_states[] 107 __attribute__((unused)) = { 108 [ACCEL_SEQUENCE_STATE_INIT] = "init", 109 [ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf", 110 [ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf", 111 [ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf", 112 [ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf", 113 [ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data", 114 [ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data", 115 [ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task", 116 [ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task", 117 [ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task", 118 [ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task", 119 [ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data", 120 [ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data", 121 [ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS] = "driver-exec-tasks", 122 [ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS] = "driver-await-tasks", 123 [ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS] = "driver-complete-tasks", 124 [ACCEL_SEQUENCE_STATE_ERROR] = "error", 125 [ACCEL_SEQUENCE_STATE_MAX] = "", 126 }; 127 128 #define ACCEL_SEQUENCE_STATE_STRING(s) \ 129 (((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \ 130 ? g_seq_states[s] : "unknown") 131 132 struct accel_buffer { 133 struct spdk_accel_sequence *seq; 134 void *buf; 135 uint64_t len; 136 struct spdk_iobuf_entry iobuf; 137 spdk_accel_sequence_get_buf_cb cb_fn; 138 void *cb_ctx; 139 SLIST_ENTRY(accel_buffer) link; 140 struct accel_io_channel *ch; 141 }; 142 143 struct accel_io_channel { 144 struct spdk_io_channel *module_ch[SPDK_ACCEL_OPC_LAST]; 145 struct spdk_io_channel *driver_channel; 146 void *task_pool_base; 147 struct spdk_accel_sequence *seq_pool_base; 148 struct accel_buffer *buf_pool_base; 149 struct spdk_accel_task_aux_data *task_aux_data_base; 150 STAILQ_HEAD(, spdk_accel_task) task_pool; 151 SLIST_HEAD(, spdk_accel_task_aux_data) task_aux_data_pool; 152 SLIST_HEAD(, spdk_accel_sequence) seq_pool; 153 SLIST_HEAD(, accel_buffer) buf_pool; 154 struct spdk_iobuf_channel iobuf; 155 struct accel_stats stats; 156 }; 157 158 TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task); 159 160 struct spdk_accel_sequence { 161 struct accel_io_channel *ch; 162 struct accel_sequence_tasks tasks; 163 SLIST_HEAD(, accel_buffer) bounce_bufs; 164 int status; 165 /* state uses enum accel_sequence_state */ 166 uint8_t state; 167 bool in_process_sequence; 168 spdk_accel_completion_cb cb_fn; 169 void *cb_arg; 170 SLIST_ENTRY(spdk_accel_sequence) link; 171 }; 172 SPDK_STATIC_ASSERT(sizeof(struct spdk_accel_sequence) == 64, "invalid size"); 173 174 #define accel_update_stats(ch, event, v) \ 175 do { \ 176 (ch)->stats.event += (v); \ 177 } while (0) 178 179 #define accel_update_task_stats(ch, task, event, v) \ 180 accel_update_stats(ch, operations[(task)->op_code].event, v) 181 182 static inline void accel_sequence_task_cb(void *cb_arg, int status); 183 184 static inline void 185 accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state) 186 { 187 SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq, 188 ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state)); 189 assert(seq->state != ACCEL_SEQUENCE_STATE_ERROR || state == ACCEL_SEQUENCE_STATE_ERROR); 190 seq->state = state; 191 } 192 193 static void 194 accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status) 195 { 196 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR); 197 assert(status != 0); 198 seq->status = status; 199 } 200 201 int 202 spdk_accel_get_opc_module_name(enum spdk_accel_opcode opcode, const char **module_name) 203 { 204 if (opcode >= SPDK_ACCEL_OPC_LAST) { 205 /* invalid opcode */ 206 return -EINVAL; 207 } 208 209 if (g_modules_opc[opcode].module) { 210 *module_name = g_modules_opc[opcode].module->name; 211 } else { 212 return -ENOENT; 213 } 214 215 return 0; 216 } 217 218 void 219 _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn) 220 { 221 struct spdk_accel_module_if *accel_module; 222 enum spdk_accel_opcode opcode; 223 int j = 0; 224 225 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 226 for (opcode = 0; opcode < SPDK_ACCEL_OPC_LAST; opcode++) { 227 if (accel_module->supports_opcode(opcode)) { 228 info->ops[j] = opcode; 229 j++; 230 } 231 } 232 info->name = accel_module->name; 233 info->num_ops = j; 234 fn(info); 235 j = 0; 236 } 237 } 238 239 const char * 240 spdk_accel_get_opcode_name(enum spdk_accel_opcode opcode) 241 { 242 if (opcode < SPDK_ACCEL_OPC_LAST) { 243 return g_opcode_strings[opcode]; 244 } 245 246 return NULL; 247 } 248 249 int 250 spdk_accel_assign_opc(enum spdk_accel_opcode opcode, const char *name) 251 { 252 char *copy; 253 254 if (g_modules_started == true) { 255 /* we don't allow re-assignment once things have started */ 256 return -EINVAL; 257 } 258 259 if (opcode >= SPDK_ACCEL_OPC_LAST) { 260 /* invalid opcode */ 261 return -EINVAL; 262 } 263 264 copy = strdup(name); 265 if (copy == NULL) { 266 return -ENOMEM; 267 } 268 269 /* module selection will be validated after the framework starts. */ 270 free(g_modules_opc_override[opcode]); 271 g_modules_opc_override[opcode] = copy; 272 273 return 0; 274 } 275 276 inline static struct spdk_accel_task * 277 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg) 278 { 279 struct spdk_accel_task *accel_task; 280 281 accel_task = STAILQ_FIRST(&accel_ch->task_pool); 282 if (spdk_unlikely(accel_task == NULL)) { 283 accel_update_stats(accel_ch, retry.task, 1); 284 return NULL; 285 } 286 287 accel_update_stats(accel_ch, task_outstanding, 1); 288 STAILQ_REMOVE_HEAD(&accel_ch->task_pool, link); 289 accel_task->link.stqe_next = NULL; 290 291 accel_task->cb_fn = cb_fn; 292 accel_task->cb_arg = cb_arg; 293 accel_task->accel_ch = accel_ch; 294 accel_task->s.iovs = NULL; 295 accel_task->d.iovs = NULL; 296 297 return accel_task; 298 } 299 300 static void 301 _put_task(struct accel_io_channel *ch, struct spdk_accel_task *task) 302 { 303 STAILQ_INSERT_HEAD(&ch->task_pool, task, link); 304 accel_update_stats(ch, task_outstanding, -1); 305 } 306 307 void 308 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status) 309 { 310 struct accel_io_channel *accel_ch = accel_task->accel_ch; 311 spdk_accel_completion_cb cb_fn; 312 void *cb_arg; 313 314 accel_update_task_stats(accel_ch, accel_task, executed, 1); 315 accel_update_task_stats(accel_ch, accel_task, num_bytes, accel_task->nbytes); 316 if (spdk_unlikely(status != 0)) { 317 accel_update_task_stats(accel_ch, accel_task, failed, 1); 318 } 319 320 if (accel_task->seq) { 321 accel_sequence_task_cb(accel_task->seq, status); 322 return; 323 } 324 325 cb_fn = accel_task->cb_fn; 326 cb_arg = accel_task->cb_arg; 327 328 if (accel_task->has_aux) { 329 SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task->aux, link); 330 accel_task->aux = NULL; 331 accel_task->has_aux = false; 332 } 333 334 /* We should put the accel_task into the list firstly in order to avoid 335 * the accel task list is exhausted when there is recursive call to 336 * allocate accel_task in user's call back function (cb_fn) 337 */ 338 _put_task(accel_ch, accel_task); 339 340 cb_fn(cb_arg, status); 341 } 342 343 static inline int 344 accel_submit_task(struct accel_io_channel *accel_ch, struct spdk_accel_task *task) 345 { 346 struct spdk_io_channel *module_ch = accel_ch->module_ch[task->op_code]; 347 struct spdk_accel_module_if *module = g_modules_opc[task->op_code].module; 348 int rc; 349 350 rc = module->submit_tasks(module_ch, task); 351 if (spdk_unlikely(rc != 0)) { 352 accel_update_task_stats(accel_ch, task, failed, 1); 353 } 354 355 return rc; 356 } 357 358 static inline uint64_t 359 accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt) 360 { 361 uint64_t result = 0; 362 uint32_t i; 363 364 for (i = 0; i < iovcnt; ++i) { 365 result += iovs[i].iov_len; 366 } 367 368 return result; 369 } 370 371 #define ACCEL_TASK_ALLOC_AUX_BUF(task) \ 372 do { \ 373 (task)->aux = SLIST_FIRST(&(task)->accel_ch->task_aux_data_pool); \ 374 if (spdk_unlikely(!(task)->aux)) { \ 375 SPDK_ERRLOG("Fatal problem, aux data was not allocated\n"); \ 376 _put_task(task->accel_ch, task); \ 377 assert(0); \ 378 return -ENOMEM; \ 379 } \ 380 SLIST_REMOVE_HEAD(&(task)->accel_ch->task_aux_data_pool, link); \ 381 (task)->has_aux = true; \ 382 } while (0) 383 384 /* Accel framework public API for copy function */ 385 int 386 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src, 387 uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg) 388 { 389 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 390 struct spdk_accel_task *accel_task; 391 392 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 393 if (spdk_unlikely(accel_task == NULL)) { 394 return -ENOMEM; 395 } 396 397 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 398 399 accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC]; 400 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 401 accel_task->d.iovs[0].iov_base = dst; 402 accel_task->d.iovs[0].iov_len = nbytes; 403 accel_task->d.iovcnt = 1; 404 accel_task->s.iovs[0].iov_base = src; 405 accel_task->s.iovs[0].iov_len = nbytes; 406 accel_task->s.iovcnt = 1; 407 accel_task->nbytes = nbytes; 408 accel_task->op_code = SPDK_ACCEL_OPC_COPY; 409 accel_task->src_domain = NULL; 410 accel_task->dst_domain = NULL; 411 412 return accel_submit_task(accel_ch, accel_task); 413 } 414 415 /* Accel framework public API for dual cast copy function */ 416 int 417 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1, 418 void *dst2, void *src, uint64_t nbytes, 419 spdk_accel_completion_cb cb_fn, void *cb_arg) 420 { 421 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 422 struct spdk_accel_task *accel_task; 423 424 if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) { 425 SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n"); 426 return -EINVAL; 427 } 428 429 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 430 if (spdk_unlikely(accel_task == NULL)) { 431 return -ENOMEM; 432 } 433 434 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 435 436 accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC]; 437 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 438 accel_task->d2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST2]; 439 accel_task->d.iovs[0].iov_base = dst1; 440 accel_task->d.iovs[0].iov_len = nbytes; 441 accel_task->d.iovcnt = 1; 442 accel_task->d2.iovs[0].iov_base = dst2; 443 accel_task->d2.iovs[0].iov_len = nbytes; 444 accel_task->d2.iovcnt = 1; 445 accel_task->s.iovs[0].iov_base = src; 446 accel_task->s.iovs[0].iov_len = nbytes; 447 accel_task->s.iovcnt = 1; 448 accel_task->nbytes = nbytes; 449 accel_task->op_code = SPDK_ACCEL_OPC_DUALCAST; 450 accel_task->src_domain = NULL; 451 accel_task->dst_domain = NULL; 452 453 return accel_submit_task(accel_ch, accel_task); 454 } 455 456 /* Accel framework public API for compare function */ 457 458 int 459 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1, 460 void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn, 461 void *cb_arg) 462 { 463 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 464 struct spdk_accel_task *accel_task; 465 466 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 467 if (spdk_unlikely(accel_task == NULL)) { 468 return -ENOMEM; 469 } 470 471 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 472 473 accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC]; 474 accel_task->s2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC2]; 475 accel_task->s.iovs[0].iov_base = src1; 476 accel_task->s.iovs[0].iov_len = nbytes; 477 accel_task->s.iovcnt = 1; 478 accel_task->s2.iovs[0].iov_base = src2; 479 accel_task->s2.iovs[0].iov_len = nbytes; 480 accel_task->s2.iovcnt = 1; 481 accel_task->nbytes = nbytes; 482 accel_task->op_code = SPDK_ACCEL_OPC_COMPARE; 483 accel_task->src_domain = NULL; 484 accel_task->dst_domain = NULL; 485 486 return accel_submit_task(accel_ch, accel_task); 487 } 488 489 /* Accel framework public API for fill function */ 490 int 491 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst, 492 uint8_t fill, uint64_t nbytes, 493 spdk_accel_completion_cb cb_fn, void *cb_arg) 494 { 495 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 496 struct spdk_accel_task *accel_task; 497 498 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 499 if (spdk_unlikely(accel_task == NULL)) { 500 return -ENOMEM; 501 } 502 503 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 504 505 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 506 accel_task->d.iovs[0].iov_base = dst; 507 accel_task->d.iovs[0].iov_len = nbytes; 508 accel_task->d.iovcnt = 1; 509 accel_task->nbytes = nbytes; 510 memset(&accel_task->fill_pattern, fill, sizeof(uint64_t)); 511 accel_task->op_code = SPDK_ACCEL_OPC_FILL; 512 accel_task->src_domain = NULL; 513 accel_task->dst_domain = NULL; 514 515 return accel_submit_task(accel_ch, accel_task); 516 } 517 518 /* Accel framework public API for CRC-32C function */ 519 int 520 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst, 521 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn, 522 void *cb_arg) 523 { 524 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 525 struct spdk_accel_task *accel_task; 526 527 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 528 if (spdk_unlikely(accel_task == NULL)) { 529 return -ENOMEM; 530 } 531 532 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 533 534 accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC]; 535 accel_task->s.iovs[0].iov_base = src; 536 accel_task->s.iovs[0].iov_len = nbytes; 537 accel_task->s.iovcnt = 1; 538 accel_task->nbytes = nbytes; 539 accel_task->crc_dst = crc_dst; 540 accel_task->seed = seed; 541 accel_task->op_code = SPDK_ACCEL_OPC_CRC32C; 542 accel_task->src_domain = NULL; 543 accel_task->dst_domain = NULL; 544 545 return accel_submit_task(accel_ch, accel_task); 546 } 547 548 /* Accel framework public API for chained CRC-32C function */ 549 int 550 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst, 551 struct iovec *iov, uint32_t iov_cnt, uint32_t seed, 552 spdk_accel_completion_cb cb_fn, void *cb_arg) 553 { 554 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 555 struct spdk_accel_task *accel_task; 556 557 if (iov == NULL) { 558 SPDK_ERRLOG("iov should not be NULL"); 559 return -EINVAL; 560 } 561 562 if (!iov_cnt) { 563 SPDK_ERRLOG("iovcnt should not be zero value\n"); 564 return -EINVAL; 565 } 566 567 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 568 if (spdk_unlikely(accel_task == NULL)) { 569 SPDK_ERRLOG("no memory\n"); 570 assert(0); 571 return -ENOMEM; 572 } 573 574 accel_task->s.iovs = iov; 575 accel_task->s.iovcnt = iov_cnt; 576 accel_task->nbytes = accel_get_iovlen(iov, iov_cnt); 577 accel_task->crc_dst = crc_dst; 578 accel_task->seed = seed; 579 accel_task->op_code = SPDK_ACCEL_OPC_CRC32C; 580 accel_task->src_domain = NULL; 581 accel_task->dst_domain = NULL; 582 583 return accel_submit_task(accel_ch, accel_task); 584 } 585 586 /* Accel framework public API for copy with CRC-32C function */ 587 int 588 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst, 589 void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes, 590 spdk_accel_completion_cb cb_fn, void *cb_arg) 591 { 592 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 593 struct spdk_accel_task *accel_task; 594 595 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 596 if (spdk_unlikely(accel_task == NULL)) { 597 return -ENOMEM; 598 } 599 600 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 601 602 accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC]; 603 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 604 accel_task->d.iovs[0].iov_base = dst; 605 accel_task->d.iovs[0].iov_len = nbytes; 606 accel_task->d.iovcnt = 1; 607 accel_task->s.iovs[0].iov_base = src; 608 accel_task->s.iovs[0].iov_len = nbytes; 609 accel_task->s.iovcnt = 1; 610 accel_task->nbytes = nbytes; 611 accel_task->crc_dst = crc_dst; 612 accel_task->seed = seed; 613 accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C; 614 accel_task->src_domain = NULL; 615 accel_task->dst_domain = NULL; 616 617 return accel_submit_task(accel_ch, accel_task); 618 } 619 620 /* Accel framework public API for chained copy + CRC-32C function */ 621 int 622 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst, 623 struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst, 624 uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg) 625 { 626 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 627 struct spdk_accel_task *accel_task; 628 uint64_t nbytes; 629 630 if (src_iovs == NULL) { 631 SPDK_ERRLOG("iov should not be NULL"); 632 return -EINVAL; 633 } 634 635 if (!iov_cnt) { 636 SPDK_ERRLOG("iovcnt should not be zero value\n"); 637 return -EINVAL; 638 } 639 640 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 641 if (spdk_unlikely(accel_task == NULL)) { 642 SPDK_ERRLOG("no memory\n"); 643 assert(0); 644 return -ENOMEM; 645 } 646 647 nbytes = accel_get_iovlen(src_iovs, iov_cnt); 648 649 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 650 651 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 652 accel_task->d.iovs[0].iov_base = dst; 653 accel_task->d.iovs[0].iov_len = nbytes; 654 accel_task->d.iovcnt = 1; 655 accel_task->s.iovs = src_iovs; 656 accel_task->s.iovcnt = iov_cnt; 657 accel_task->nbytes = nbytes; 658 accel_task->crc_dst = crc_dst; 659 accel_task->seed = seed; 660 accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C; 661 accel_task->src_domain = NULL; 662 accel_task->dst_domain = NULL; 663 664 return accel_submit_task(accel_ch, accel_task); 665 } 666 667 int 668 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes, 669 struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size, 670 spdk_accel_completion_cb cb_fn, void *cb_arg) 671 { 672 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 673 struct spdk_accel_task *accel_task; 674 675 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 676 if (spdk_unlikely(accel_task == NULL)) { 677 return -ENOMEM; 678 } 679 680 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 681 682 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 683 accel_task->d.iovs[0].iov_base = dst; 684 accel_task->d.iovs[0].iov_len = nbytes; 685 accel_task->d.iovcnt = 1; 686 accel_task->output_size = output_size; 687 accel_task->s.iovs = src_iovs; 688 accel_task->s.iovcnt = src_iovcnt; 689 accel_task->nbytes = nbytes; 690 accel_task->op_code = SPDK_ACCEL_OPC_COMPRESS; 691 accel_task->src_domain = NULL; 692 accel_task->dst_domain = NULL; 693 694 return accel_submit_task(accel_ch, accel_task); 695 } 696 697 int 698 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs, 699 size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt, 700 uint32_t *output_size, spdk_accel_completion_cb cb_fn, 701 void *cb_arg) 702 { 703 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 704 struct spdk_accel_task *accel_task; 705 706 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 707 if (spdk_unlikely(accel_task == NULL)) { 708 return -ENOMEM; 709 } 710 711 accel_task->output_size = output_size; 712 accel_task->s.iovs = src_iovs; 713 accel_task->s.iovcnt = src_iovcnt; 714 accel_task->d.iovs = dst_iovs; 715 accel_task->d.iovcnt = dst_iovcnt; 716 accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 717 accel_task->op_code = SPDK_ACCEL_OPC_DECOMPRESS; 718 accel_task->src_domain = NULL; 719 accel_task->dst_domain = NULL; 720 721 return accel_submit_task(accel_ch, accel_task); 722 } 723 724 int 725 spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key, 726 struct iovec *dst_iovs, uint32_t dst_iovcnt, 727 struct iovec *src_iovs, uint32_t src_iovcnt, 728 uint64_t iv, uint32_t block_size, 729 spdk_accel_completion_cb cb_fn, void *cb_arg) 730 { 731 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 732 struct spdk_accel_task *accel_task; 733 734 if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) { 735 return -EINVAL; 736 } 737 738 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 739 if (spdk_unlikely(accel_task == NULL)) { 740 return -ENOMEM; 741 } 742 743 accel_task->crypto_key = key; 744 accel_task->s.iovs = src_iovs; 745 accel_task->s.iovcnt = src_iovcnt; 746 accel_task->d.iovs = dst_iovs; 747 accel_task->d.iovcnt = dst_iovcnt; 748 accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 749 accel_task->iv = iv; 750 accel_task->block_size = block_size; 751 accel_task->op_code = SPDK_ACCEL_OPC_ENCRYPT; 752 accel_task->src_domain = NULL; 753 accel_task->dst_domain = NULL; 754 755 return accel_submit_task(accel_ch, accel_task); 756 } 757 758 int 759 spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key, 760 struct iovec *dst_iovs, uint32_t dst_iovcnt, 761 struct iovec *src_iovs, uint32_t src_iovcnt, 762 uint64_t iv, uint32_t block_size, 763 spdk_accel_completion_cb cb_fn, void *cb_arg) 764 { 765 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 766 struct spdk_accel_task *accel_task; 767 768 if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) { 769 return -EINVAL; 770 } 771 772 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 773 if (spdk_unlikely(accel_task == NULL)) { 774 return -ENOMEM; 775 } 776 777 accel_task->crypto_key = key; 778 accel_task->s.iovs = src_iovs; 779 accel_task->s.iovcnt = src_iovcnt; 780 accel_task->d.iovs = dst_iovs; 781 accel_task->d.iovcnt = dst_iovcnt; 782 accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 783 accel_task->iv = iv; 784 accel_task->block_size = block_size; 785 accel_task->op_code = SPDK_ACCEL_OPC_DECRYPT; 786 accel_task->src_domain = NULL; 787 accel_task->dst_domain = NULL; 788 789 return accel_submit_task(accel_ch, accel_task); 790 } 791 792 int 793 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs, 794 uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg) 795 { 796 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 797 struct spdk_accel_task *accel_task; 798 799 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 800 if (spdk_unlikely(accel_task == NULL)) { 801 return -ENOMEM; 802 } 803 804 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 805 806 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 807 accel_task->nsrcs.srcs = sources; 808 accel_task->nsrcs.cnt = nsrcs; 809 accel_task->d.iovs[0].iov_base = dst; 810 accel_task->d.iovs[0].iov_len = nbytes; 811 accel_task->d.iovcnt = 1; 812 accel_task->nbytes = nbytes; 813 accel_task->op_code = SPDK_ACCEL_OPC_XOR; 814 accel_task->src_domain = NULL; 815 accel_task->dst_domain = NULL; 816 817 return accel_submit_task(accel_ch, accel_task); 818 } 819 820 int 821 spdk_accel_submit_dif_verify(struct spdk_io_channel *ch, 822 struct iovec *iovs, size_t iovcnt, uint32_t num_blocks, 823 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err, 824 spdk_accel_completion_cb cb_fn, void *cb_arg) 825 { 826 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 827 struct spdk_accel_task *accel_task; 828 829 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 830 if (accel_task == NULL) { 831 return -ENOMEM; 832 } 833 834 accel_task->s.iovs = iovs; 835 accel_task->s.iovcnt = iovcnt; 836 accel_task->dif.ctx = ctx; 837 accel_task->dif.err = err; 838 accel_task->dif.num_blocks = num_blocks; 839 accel_task->nbytes = num_blocks * ctx->block_size; 840 accel_task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY; 841 accel_task->src_domain = NULL; 842 accel_task->dst_domain = NULL; 843 844 return accel_submit_task(accel_ch, accel_task); 845 } 846 847 int 848 spdk_accel_submit_dif_generate(struct spdk_io_channel *ch, 849 struct iovec *iovs, size_t iovcnt, uint32_t num_blocks, 850 const struct spdk_dif_ctx *ctx, 851 spdk_accel_completion_cb cb_fn, void *cb_arg) 852 { 853 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 854 struct spdk_accel_task *accel_task; 855 856 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 857 if (accel_task == NULL) { 858 return -ENOMEM; 859 } 860 861 accel_task->s.iovs = iovs; 862 accel_task->s.iovcnt = iovcnt; 863 accel_task->dif.ctx = ctx; 864 accel_task->dif.num_blocks = num_blocks; 865 accel_task->nbytes = num_blocks * ctx->block_size; 866 accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE; 867 accel_task->src_domain = NULL; 868 accel_task->dst_domain = NULL; 869 870 return accel_submit_task(accel_ch, accel_task); 871 } 872 873 int 874 spdk_accel_submit_dif_generate_copy(struct spdk_io_channel *ch, struct iovec *dst_iovs, 875 size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt, 876 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 877 spdk_accel_completion_cb cb_fn, void *cb_arg) 878 { 879 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 880 struct spdk_accel_task *accel_task; 881 882 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 883 if (accel_task == NULL) { 884 return -ENOMEM; 885 } 886 887 accel_task->s.iovs = src_iovs; 888 accel_task->s.iovcnt = src_iovcnt; 889 accel_task->d.iovs = dst_iovs; 890 accel_task->d.iovcnt = dst_iovcnt; 891 accel_task->dif.ctx = ctx; 892 accel_task->dif.num_blocks = num_blocks; 893 accel_task->nbytes = num_blocks * ctx->block_size; 894 accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE_COPY; 895 accel_task->src_domain = NULL; 896 accel_task->dst_domain = NULL; 897 898 return accel_submit_task(accel_ch, accel_task); 899 } 900 901 int 902 spdk_accel_submit_dif_verify_copy(struct spdk_io_channel *ch, 903 struct iovec *dst_iovs, size_t dst_iovcnt, 904 struct iovec *src_iovs, size_t src_iovcnt, uint32_t num_blocks, 905 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err, 906 spdk_accel_completion_cb cb_fn, void *cb_arg) 907 { 908 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 909 struct spdk_accel_task *accel_task; 910 911 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 912 if (accel_task == NULL) { 913 return -ENOMEM; 914 } 915 916 accel_task->s.iovs = src_iovs; 917 accel_task->s.iovcnt = src_iovcnt; 918 accel_task->d.iovs = dst_iovs; 919 accel_task->d.iovcnt = dst_iovcnt; 920 accel_task->dif.ctx = ctx; 921 accel_task->dif.err = err; 922 accel_task->dif.num_blocks = num_blocks; 923 accel_task->nbytes = num_blocks * ctx->block_size; 924 accel_task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY_COPY; 925 accel_task->src_domain = NULL; 926 accel_task->dst_domain = NULL; 927 928 return accel_submit_task(accel_ch, accel_task); 929 } 930 931 static inline struct accel_buffer * 932 accel_get_buf(struct accel_io_channel *ch, uint64_t len) 933 { 934 struct accel_buffer *buf; 935 936 buf = SLIST_FIRST(&ch->buf_pool); 937 if (spdk_unlikely(buf == NULL)) { 938 accel_update_stats(ch, retry.bufdesc, 1); 939 return NULL; 940 } 941 942 SLIST_REMOVE_HEAD(&ch->buf_pool, link); 943 buf->len = len; 944 buf->buf = NULL; 945 buf->seq = NULL; 946 buf->cb_fn = NULL; 947 948 return buf; 949 } 950 951 static inline void 952 accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf) 953 { 954 if (buf->buf != NULL) { 955 spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len); 956 } 957 958 SLIST_INSERT_HEAD(&ch->buf_pool, buf, link); 959 } 960 961 static inline struct spdk_accel_sequence * 962 accel_sequence_get(struct accel_io_channel *ch) 963 { 964 struct spdk_accel_sequence *seq; 965 966 assert(g_opts.task_count >= ch->stats.task_outstanding); 967 968 /* Sequence cannot be allocated if number of available task objects cannot satisfy required limit. 969 * This is to prevent potential dead lock when few requests are pending task resource and none can 970 * advance the processing. This solution should work only if there is single async operation after 971 * sequence obj obtained, so assume that is possible to happen with io buffer allocation now, if 972 * there are more async operations then solution should be improved. */ 973 if (spdk_unlikely(g_opts.task_count - ch->stats.task_outstanding < ACCEL_TASKS_IN_SEQUENCE_LIMIT)) { 974 return NULL; 975 } 976 977 seq = SLIST_FIRST(&ch->seq_pool); 978 if (spdk_unlikely(seq == NULL)) { 979 accel_update_stats(ch, retry.sequence, 1); 980 return NULL; 981 } 982 983 accel_update_stats(ch, sequence_outstanding, 1); 984 SLIST_REMOVE_HEAD(&ch->seq_pool, link); 985 986 TAILQ_INIT(&seq->tasks); 987 SLIST_INIT(&seq->bounce_bufs); 988 989 seq->ch = ch; 990 seq->status = 0; 991 seq->state = ACCEL_SEQUENCE_STATE_INIT; 992 seq->in_process_sequence = false; 993 994 return seq; 995 } 996 997 static inline void 998 accel_sequence_put(struct spdk_accel_sequence *seq) 999 { 1000 struct accel_io_channel *ch = seq->ch; 1001 struct accel_buffer *buf; 1002 1003 while (!SLIST_EMPTY(&seq->bounce_bufs)) { 1004 buf = SLIST_FIRST(&seq->bounce_bufs); 1005 SLIST_REMOVE_HEAD(&seq->bounce_bufs, link); 1006 accel_put_buf(seq->ch, buf); 1007 } 1008 1009 assert(TAILQ_EMPTY(&seq->tasks)); 1010 seq->ch = NULL; 1011 1012 SLIST_INSERT_HEAD(&ch->seq_pool, seq, link); 1013 accel_update_stats(ch, sequence_outstanding, -1); 1014 } 1015 1016 static void accel_sequence_task_cb(void *cb_arg, int status); 1017 1018 static inline struct spdk_accel_task * 1019 accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq, 1020 spdk_accel_step_cb cb_fn, void *cb_arg) 1021 { 1022 struct spdk_accel_task *task; 1023 1024 task = _get_task(ch, NULL, NULL); 1025 if (spdk_unlikely(task == NULL)) { 1026 return task; 1027 } 1028 1029 task->step_cb_fn = cb_fn; 1030 task->cb_arg = cb_arg; 1031 task->seq = seq; 1032 1033 return task; 1034 } 1035 1036 int 1037 spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1038 struct iovec *dst_iovs, uint32_t dst_iovcnt, 1039 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1040 struct iovec *src_iovs, uint32_t src_iovcnt, 1041 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1042 spdk_accel_step_cb cb_fn, void *cb_arg) 1043 { 1044 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1045 struct spdk_accel_task *task; 1046 struct spdk_accel_sequence *seq = *pseq; 1047 1048 if (seq == NULL) { 1049 seq = accel_sequence_get(accel_ch); 1050 if (spdk_unlikely(seq == NULL)) { 1051 return -ENOMEM; 1052 } 1053 } 1054 1055 assert(seq->ch == accel_ch); 1056 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1057 if (spdk_unlikely(task == NULL)) { 1058 if (*pseq == NULL) { 1059 accel_sequence_put(seq); 1060 } 1061 1062 return -ENOMEM; 1063 } 1064 1065 task->dst_domain = dst_domain; 1066 task->dst_domain_ctx = dst_domain_ctx; 1067 task->d.iovs = dst_iovs; 1068 task->d.iovcnt = dst_iovcnt; 1069 task->src_domain = src_domain; 1070 task->src_domain_ctx = src_domain_ctx; 1071 task->s.iovs = src_iovs; 1072 task->s.iovcnt = src_iovcnt; 1073 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1074 task->op_code = SPDK_ACCEL_OPC_COPY; 1075 1076 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1077 *pseq = seq; 1078 1079 return 0; 1080 } 1081 1082 int 1083 spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1084 void *buf, uint64_t len, 1085 struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern, 1086 spdk_accel_step_cb cb_fn, void *cb_arg) 1087 { 1088 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1089 struct spdk_accel_task *task; 1090 struct spdk_accel_sequence *seq = *pseq; 1091 1092 if (seq == NULL) { 1093 seq = accel_sequence_get(accel_ch); 1094 if (spdk_unlikely(seq == NULL)) { 1095 return -ENOMEM; 1096 } 1097 } 1098 1099 assert(seq->ch == accel_ch); 1100 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1101 if (spdk_unlikely(task == NULL)) { 1102 if (*pseq == NULL) { 1103 accel_sequence_put(seq); 1104 } 1105 1106 return -ENOMEM; 1107 } 1108 1109 memset(&task->fill_pattern, pattern, sizeof(uint64_t)); 1110 1111 task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool); 1112 if (spdk_unlikely(!task->aux)) { 1113 SPDK_ERRLOG("Fatal problem, aux data was not allocated\n"); 1114 if (*pseq == NULL) { 1115 accel_sequence_put((seq)); 1116 } 1117 1118 task->seq = NULL; 1119 _put_task(task->accel_ch, task); 1120 assert(0); 1121 return -ENOMEM; 1122 } 1123 SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link); 1124 task->has_aux = true; 1125 1126 task->d.iovs = &task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 1127 task->d.iovs[0].iov_base = buf; 1128 task->d.iovs[0].iov_len = len; 1129 task->d.iovcnt = 1; 1130 task->nbytes = len; 1131 task->src_domain = NULL; 1132 task->dst_domain = domain; 1133 task->dst_domain_ctx = domain_ctx; 1134 task->op_code = SPDK_ACCEL_OPC_FILL; 1135 1136 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1137 *pseq = seq; 1138 1139 return 0; 1140 } 1141 1142 int 1143 spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1144 struct iovec *dst_iovs, size_t dst_iovcnt, 1145 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1146 struct iovec *src_iovs, size_t src_iovcnt, 1147 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1148 spdk_accel_step_cb cb_fn, void *cb_arg) 1149 { 1150 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1151 struct spdk_accel_task *task; 1152 struct spdk_accel_sequence *seq = *pseq; 1153 1154 if (seq == NULL) { 1155 seq = accel_sequence_get(accel_ch); 1156 if (spdk_unlikely(seq == NULL)) { 1157 return -ENOMEM; 1158 } 1159 } 1160 1161 assert(seq->ch == accel_ch); 1162 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1163 if (spdk_unlikely(task == NULL)) { 1164 if (*pseq == NULL) { 1165 accel_sequence_put(seq); 1166 } 1167 1168 return -ENOMEM; 1169 } 1170 1171 /* TODO: support output_size for chaining */ 1172 task->output_size = NULL; 1173 task->dst_domain = dst_domain; 1174 task->dst_domain_ctx = dst_domain_ctx; 1175 task->d.iovs = dst_iovs; 1176 task->d.iovcnt = dst_iovcnt; 1177 task->src_domain = src_domain; 1178 task->src_domain_ctx = src_domain_ctx; 1179 task->s.iovs = src_iovs; 1180 task->s.iovcnt = src_iovcnt; 1181 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1182 task->op_code = SPDK_ACCEL_OPC_DECOMPRESS; 1183 1184 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1185 *pseq = seq; 1186 1187 return 0; 1188 } 1189 1190 int 1191 spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1192 struct spdk_accel_crypto_key *key, 1193 struct iovec *dst_iovs, uint32_t dst_iovcnt, 1194 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1195 struct iovec *src_iovs, uint32_t src_iovcnt, 1196 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1197 uint64_t iv, uint32_t block_size, 1198 spdk_accel_step_cb cb_fn, void *cb_arg) 1199 { 1200 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1201 struct spdk_accel_task *task; 1202 struct spdk_accel_sequence *seq = *pseq; 1203 1204 assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size); 1205 1206 if (seq == NULL) { 1207 seq = accel_sequence_get(accel_ch); 1208 if (spdk_unlikely(seq == NULL)) { 1209 return -ENOMEM; 1210 } 1211 } 1212 1213 assert(seq->ch == accel_ch); 1214 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1215 if (spdk_unlikely(task == NULL)) { 1216 if (*pseq == NULL) { 1217 accel_sequence_put(seq); 1218 } 1219 1220 return -ENOMEM; 1221 } 1222 1223 task->crypto_key = key; 1224 task->src_domain = src_domain; 1225 task->src_domain_ctx = src_domain_ctx; 1226 task->s.iovs = src_iovs; 1227 task->s.iovcnt = src_iovcnt; 1228 task->dst_domain = dst_domain; 1229 task->dst_domain_ctx = dst_domain_ctx; 1230 task->d.iovs = dst_iovs; 1231 task->d.iovcnt = dst_iovcnt; 1232 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1233 task->iv = iv; 1234 task->block_size = block_size; 1235 task->op_code = SPDK_ACCEL_OPC_ENCRYPT; 1236 1237 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1238 *pseq = seq; 1239 1240 return 0; 1241 } 1242 1243 int 1244 spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1245 struct spdk_accel_crypto_key *key, 1246 struct iovec *dst_iovs, uint32_t dst_iovcnt, 1247 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1248 struct iovec *src_iovs, uint32_t src_iovcnt, 1249 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1250 uint64_t iv, uint32_t block_size, 1251 spdk_accel_step_cb cb_fn, void *cb_arg) 1252 { 1253 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1254 struct spdk_accel_task *task; 1255 struct spdk_accel_sequence *seq = *pseq; 1256 1257 assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size); 1258 1259 if (seq == NULL) { 1260 seq = accel_sequence_get(accel_ch); 1261 if (spdk_unlikely(seq == NULL)) { 1262 return -ENOMEM; 1263 } 1264 } 1265 1266 assert(seq->ch == accel_ch); 1267 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1268 if (spdk_unlikely(task == NULL)) { 1269 if (*pseq == NULL) { 1270 accel_sequence_put(seq); 1271 } 1272 1273 return -ENOMEM; 1274 } 1275 1276 task->crypto_key = key; 1277 task->src_domain = src_domain; 1278 task->src_domain_ctx = src_domain_ctx; 1279 task->s.iovs = src_iovs; 1280 task->s.iovcnt = src_iovcnt; 1281 task->dst_domain = dst_domain; 1282 task->dst_domain_ctx = dst_domain_ctx; 1283 task->d.iovs = dst_iovs; 1284 task->d.iovcnt = dst_iovcnt; 1285 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1286 task->iv = iv; 1287 task->block_size = block_size; 1288 task->op_code = SPDK_ACCEL_OPC_DECRYPT; 1289 1290 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1291 *pseq = seq; 1292 1293 return 0; 1294 } 1295 1296 int 1297 spdk_accel_append_crc32c(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1298 uint32_t *dst, struct iovec *iovs, uint32_t iovcnt, 1299 struct spdk_memory_domain *domain, void *domain_ctx, 1300 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg) 1301 { 1302 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1303 struct spdk_accel_task *task; 1304 struct spdk_accel_sequence *seq = *pseq; 1305 1306 if (seq == NULL) { 1307 seq = accel_sequence_get(accel_ch); 1308 if (spdk_unlikely(seq == NULL)) { 1309 return -ENOMEM; 1310 } 1311 } 1312 1313 assert(seq->ch == accel_ch); 1314 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1315 if (spdk_unlikely(task == NULL)) { 1316 if (*pseq == NULL) { 1317 accel_sequence_put(seq); 1318 } 1319 1320 return -ENOMEM; 1321 } 1322 1323 task->s.iovs = iovs; 1324 task->s.iovcnt = iovcnt; 1325 task->src_domain = domain; 1326 task->src_domain_ctx = domain_ctx; 1327 task->nbytes = accel_get_iovlen(iovs, iovcnt); 1328 task->crc_dst = dst; 1329 task->seed = seed; 1330 task->op_code = SPDK_ACCEL_OPC_CRC32C; 1331 task->dst_domain = NULL; 1332 1333 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1334 *pseq = seq; 1335 1336 return 0; 1337 } 1338 1339 int 1340 spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf, 1341 struct spdk_memory_domain **domain, void **domain_ctx) 1342 { 1343 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1344 struct accel_buffer *accel_buf; 1345 1346 accel_buf = accel_get_buf(accel_ch, len); 1347 if (spdk_unlikely(accel_buf == NULL)) { 1348 return -ENOMEM; 1349 } 1350 1351 accel_buf->ch = accel_ch; 1352 1353 /* We always return the same pointer and identify the buffers through domain_ctx */ 1354 *buf = ACCEL_BUFFER_BASE; 1355 *domain_ctx = accel_buf; 1356 *domain = g_accel_domain; 1357 1358 return 0; 1359 } 1360 1361 void 1362 spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf, 1363 struct spdk_memory_domain *domain, void *domain_ctx) 1364 { 1365 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1366 struct accel_buffer *accel_buf = domain_ctx; 1367 1368 assert(domain == g_accel_domain); 1369 assert(buf == ACCEL_BUFFER_BASE); 1370 1371 accel_put_buf(accel_ch, accel_buf); 1372 } 1373 1374 static void 1375 accel_sequence_complete_task(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1376 { 1377 struct accel_io_channel *ch = seq->ch; 1378 spdk_accel_step_cb cb_fn; 1379 void *cb_arg; 1380 1381 TAILQ_REMOVE(&seq->tasks, task, seq_link); 1382 cb_fn = task->step_cb_fn; 1383 cb_arg = task->cb_arg; 1384 task->seq = NULL; 1385 if (task->has_aux) { 1386 SLIST_INSERT_HEAD(&ch->task_aux_data_pool, task->aux, link); 1387 task->aux = NULL; 1388 task->has_aux = false; 1389 } 1390 1391 _put_task(ch, task); 1392 1393 if (cb_fn != NULL) { 1394 cb_fn(cb_arg); 1395 } 1396 } 1397 1398 static void 1399 accel_sequence_complete_tasks(struct spdk_accel_sequence *seq) 1400 { 1401 struct spdk_accel_task *task; 1402 1403 while (!TAILQ_EMPTY(&seq->tasks)) { 1404 task = TAILQ_FIRST(&seq->tasks); 1405 accel_sequence_complete_task(seq, task); 1406 } 1407 } 1408 1409 static void 1410 accel_sequence_complete(struct spdk_accel_sequence *seq) 1411 { 1412 spdk_accel_completion_cb cb_fn = seq->cb_fn; 1413 void *cb_arg = seq->cb_arg; 1414 int status = seq->status; 1415 1416 SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, status); 1417 1418 accel_update_stats(seq->ch, sequence_executed, 1); 1419 if (spdk_unlikely(status != 0)) { 1420 accel_update_stats(seq->ch, sequence_failed, 1); 1421 } 1422 1423 /* First notify all users that appended operations to this sequence */ 1424 accel_sequence_complete_tasks(seq); 1425 accel_sequence_put(seq); 1426 1427 /* Then notify the user that finished the sequence */ 1428 cb_fn(cb_arg, status); 1429 } 1430 1431 static void 1432 accel_update_virt_iov(struct iovec *diov, struct iovec *siov, struct accel_buffer *accel_buf) 1433 { 1434 uintptr_t offset; 1435 1436 offset = (uintptr_t)siov->iov_base & ACCEL_BUFFER_OFFSET_MASK; 1437 assert(offset < accel_buf->len); 1438 1439 diov->iov_base = (char *)accel_buf->buf + offset; 1440 diov->iov_len = siov->iov_len; 1441 } 1442 1443 static void 1444 accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf) 1445 { 1446 struct spdk_accel_task *task; 1447 struct iovec *iov; 1448 1449 /* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks 1450 * in a sequence that were using it. 1451 */ 1452 TAILQ_FOREACH(task, &seq->tasks, seq_link) { 1453 if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) { 1454 if (!task->has_aux) { 1455 task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool); 1456 assert(task->aux && "Can't allocate aux data structure"); 1457 task->has_aux = true; 1458 SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link); 1459 } 1460 1461 iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_SRC]; 1462 assert(task->s.iovcnt == 1); 1463 accel_update_virt_iov(iov, &task->s.iovs[0], buf); 1464 task->src_domain = NULL; 1465 task->s.iovs = iov; 1466 } 1467 if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) { 1468 if (!task->has_aux) { 1469 task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool); 1470 assert(task->aux && "Can't allocate aux data structure"); 1471 task->has_aux = true; 1472 SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link); 1473 } 1474 1475 iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_DST]; 1476 assert(task->d.iovcnt == 1); 1477 accel_update_virt_iov(iov, &task->d.iovs[0], buf); 1478 task->dst_domain = NULL; 1479 task->d.iovs = iov; 1480 } 1481 } 1482 } 1483 1484 static void accel_process_sequence(struct spdk_accel_sequence *seq); 1485 1486 static void 1487 accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf) 1488 { 1489 struct accel_buffer *accel_buf; 1490 1491 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1492 1493 assert(accel_buf->seq != NULL); 1494 assert(accel_buf->buf == NULL); 1495 accel_buf->buf = buf; 1496 1497 assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF); 1498 accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF); 1499 accel_sequence_set_virtbuf(accel_buf->seq, accel_buf); 1500 accel_process_sequence(accel_buf->seq); 1501 } 1502 1503 static bool 1504 accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf, 1505 spdk_iobuf_get_cb cb_fn) 1506 { 1507 struct accel_io_channel *ch = seq->ch; 1508 1509 assert(buf->seq == NULL); 1510 1511 buf->seq = seq; 1512 1513 /* Buffer might be already allocated by memory domain translation. */ 1514 if (buf->buf) { 1515 return true; 1516 } 1517 1518 buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn); 1519 if (spdk_unlikely(buf->buf == NULL)) { 1520 accel_update_stats(ch, retry.iobuf, 1); 1521 return false; 1522 } 1523 1524 return true; 1525 } 1526 1527 static bool 1528 accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1529 { 1530 /* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to 1531 * NULL */ 1532 if (task->src_domain == g_accel_domain) { 1533 if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx, 1534 accel_iobuf_get_virtbuf_cb)) { 1535 return false; 1536 } 1537 1538 accel_sequence_set_virtbuf(seq, task->src_domain_ctx); 1539 } 1540 1541 if (task->dst_domain == g_accel_domain) { 1542 if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx, 1543 accel_iobuf_get_virtbuf_cb)) { 1544 return false; 1545 } 1546 1547 accel_sequence_set_virtbuf(seq, task->dst_domain_ctx); 1548 } 1549 1550 return true; 1551 } 1552 1553 static void 1554 accel_sequence_get_buf_cb(struct spdk_iobuf_entry *entry, void *buf) 1555 { 1556 struct accel_buffer *accel_buf; 1557 1558 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1559 1560 assert(accel_buf->seq != NULL); 1561 assert(accel_buf->buf == NULL); 1562 accel_buf->buf = buf; 1563 1564 accel_sequence_set_virtbuf(accel_buf->seq, accel_buf); 1565 accel_buf->cb_fn(accel_buf->seq, accel_buf->cb_ctx); 1566 } 1567 1568 bool 1569 spdk_accel_alloc_sequence_buf(struct spdk_accel_sequence *seq, void *buf, 1570 struct spdk_memory_domain *domain, void *domain_ctx, 1571 spdk_accel_sequence_get_buf_cb cb_fn, void *cb_ctx) 1572 { 1573 struct accel_buffer *accel_buf = domain_ctx; 1574 1575 assert(domain == g_accel_domain); 1576 accel_buf->cb_fn = cb_fn; 1577 accel_buf->cb_ctx = cb_ctx; 1578 1579 if (!accel_sequence_alloc_buf(seq, accel_buf, accel_sequence_get_buf_cb)) { 1580 return false; 1581 } 1582 1583 accel_sequence_set_virtbuf(seq, accel_buf); 1584 1585 return true; 1586 } 1587 1588 struct spdk_accel_task * 1589 spdk_accel_sequence_first_task(struct spdk_accel_sequence *seq) 1590 { 1591 return TAILQ_FIRST(&seq->tasks); 1592 } 1593 1594 struct spdk_accel_task * 1595 spdk_accel_sequence_next_task(struct spdk_accel_task *task) 1596 { 1597 return TAILQ_NEXT(task, seq_link); 1598 } 1599 1600 static inline void 1601 accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs, 1602 uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx, 1603 struct accel_buffer *buf) 1604 { 1605 bounce->orig_iovs = *iovs; 1606 bounce->orig_iovcnt = *iovcnt; 1607 bounce->orig_domain = *domain; 1608 bounce->orig_domain_ctx = *domain_ctx; 1609 bounce->iov.iov_base = buf->buf; 1610 bounce->iov.iov_len = buf->len; 1611 1612 *iovs = &bounce->iov; 1613 *iovcnt = 1; 1614 *domain = NULL; 1615 } 1616 1617 static void 1618 accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf) 1619 { 1620 struct spdk_accel_task *task; 1621 struct accel_buffer *accel_buf; 1622 1623 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1624 assert(accel_buf->buf == NULL); 1625 accel_buf->buf = buf; 1626 1627 task = TAILQ_FIRST(&accel_buf->seq->tasks); 1628 assert(task != NULL); 1629 1630 assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF); 1631 accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF); 1632 assert(task->aux); 1633 assert(task->has_aux); 1634 accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain, 1635 &task->src_domain_ctx, accel_buf); 1636 accel_process_sequence(accel_buf->seq); 1637 } 1638 1639 static void 1640 accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf) 1641 { 1642 struct spdk_accel_task *task; 1643 struct accel_buffer *accel_buf; 1644 1645 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1646 assert(accel_buf->buf == NULL); 1647 accel_buf->buf = buf; 1648 1649 task = TAILQ_FIRST(&accel_buf->seq->tasks); 1650 assert(task != NULL); 1651 1652 assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF); 1653 accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF); 1654 assert(task->aux); 1655 assert(task->has_aux); 1656 accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain, 1657 &task->dst_domain_ctx, accel_buf); 1658 accel_process_sequence(accel_buf->seq); 1659 } 1660 1661 static int 1662 accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1663 { 1664 struct accel_buffer *buf; 1665 1666 if (task->src_domain != NULL) { 1667 /* By the time we're here, accel buffers should have been allocated */ 1668 assert(task->src_domain != g_accel_domain); 1669 1670 if (!task->has_aux) { 1671 task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool); 1672 if (spdk_unlikely(!task->aux)) { 1673 SPDK_ERRLOG("Can't allocate aux data structure\n"); 1674 assert(0); 1675 return -EAGAIN; 1676 } 1677 task->has_aux = true; 1678 SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link); 1679 } 1680 buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt)); 1681 if (buf == NULL) { 1682 SPDK_ERRLOG("Couldn't allocate buffer descriptor\n"); 1683 return -ENOMEM; 1684 } 1685 1686 SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link); 1687 if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) { 1688 return -EAGAIN; 1689 } 1690 1691 accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt, 1692 &task->src_domain, &task->src_domain_ctx, buf); 1693 } 1694 1695 if (task->dst_domain != NULL) { 1696 /* By the time we're here, accel buffers should have been allocated */ 1697 assert(task->dst_domain != g_accel_domain); 1698 1699 if (!task->has_aux) { 1700 task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool); 1701 if (spdk_unlikely(!task->aux)) { 1702 SPDK_ERRLOG("Can't allocate aux data structure\n"); 1703 assert(0); 1704 return -EAGAIN; 1705 } 1706 task->has_aux = true; 1707 SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link); 1708 } 1709 buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt)); 1710 if (buf == NULL) { 1711 /* The src buffer will be released when a sequence is completed */ 1712 SPDK_ERRLOG("Couldn't allocate buffer descriptor\n"); 1713 return -ENOMEM; 1714 } 1715 1716 SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link); 1717 if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) { 1718 return -EAGAIN; 1719 } 1720 1721 accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt, 1722 &task->dst_domain, &task->dst_domain_ctx, buf); 1723 } 1724 1725 return 0; 1726 } 1727 1728 static void 1729 accel_task_pull_data_cb(void *ctx, int status) 1730 { 1731 struct spdk_accel_sequence *seq = ctx; 1732 1733 assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA); 1734 if (spdk_likely(status == 0)) { 1735 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK); 1736 } else { 1737 accel_sequence_set_fail(seq, status); 1738 } 1739 1740 accel_process_sequence(seq); 1741 } 1742 1743 static void 1744 accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1745 { 1746 int rc; 1747 1748 assert(task->has_aux); 1749 assert(task->aux); 1750 assert(task->aux->bounce.s.orig_iovs != NULL); 1751 assert(task->aux->bounce.s.orig_domain != NULL); 1752 assert(task->aux->bounce.s.orig_domain != g_accel_domain); 1753 assert(!g_modules_opc[task->op_code].supports_memory_domains); 1754 1755 rc = spdk_memory_domain_pull_data(task->aux->bounce.s.orig_domain, 1756 task->aux->bounce.s.orig_domain_ctx, 1757 task->aux->bounce.s.orig_iovs, task->aux->bounce.s.orig_iovcnt, 1758 task->s.iovs, task->s.iovcnt, 1759 accel_task_pull_data_cb, seq); 1760 if (spdk_unlikely(rc != 0)) { 1761 SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n", 1762 spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc); 1763 accel_sequence_set_fail(seq, rc); 1764 } 1765 } 1766 1767 static void 1768 accel_task_push_data_cb(void *ctx, int status) 1769 { 1770 struct spdk_accel_sequence *seq = ctx; 1771 1772 assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA); 1773 if (spdk_likely(status == 0)) { 1774 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK); 1775 } else { 1776 accel_sequence_set_fail(seq, status); 1777 } 1778 1779 accel_process_sequence(seq); 1780 } 1781 1782 static void 1783 accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1784 { 1785 int rc; 1786 1787 assert(task->has_aux); 1788 assert(task->aux); 1789 assert(task->aux->bounce.d.orig_iovs != NULL); 1790 assert(task->aux->bounce.d.orig_domain != NULL); 1791 assert(task->aux->bounce.d.orig_domain != g_accel_domain); 1792 assert(!g_modules_opc[task->op_code].supports_memory_domains); 1793 1794 rc = spdk_memory_domain_push_data(task->aux->bounce.d.orig_domain, 1795 task->aux->bounce.d.orig_domain_ctx, 1796 task->aux->bounce.d.orig_iovs, task->aux->bounce.d.orig_iovcnt, 1797 task->d.iovs, task->d.iovcnt, 1798 accel_task_push_data_cb, seq); 1799 if (spdk_unlikely(rc != 0)) { 1800 SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n", 1801 spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc); 1802 accel_sequence_set_fail(seq, rc); 1803 } 1804 } 1805 1806 static void 1807 accel_process_sequence(struct spdk_accel_sequence *seq) 1808 { 1809 struct accel_io_channel *accel_ch = seq->ch; 1810 struct spdk_accel_task *task; 1811 enum accel_sequence_state state; 1812 int rc; 1813 1814 /* Prevent recursive calls to this function */ 1815 if (spdk_unlikely(seq->in_process_sequence)) { 1816 return; 1817 } 1818 seq->in_process_sequence = true; 1819 1820 task = TAILQ_FIRST(&seq->tasks); 1821 do { 1822 state = seq->state; 1823 switch (state) { 1824 case ACCEL_SEQUENCE_STATE_INIT: 1825 if (g_accel_driver != NULL) { 1826 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS); 1827 break; 1828 } 1829 /* Fall through */ 1830 case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF: 1831 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF); 1832 if (!accel_sequence_check_virtbuf(seq, task)) { 1833 /* We couldn't allocate a buffer, wait until one is available */ 1834 break; 1835 } 1836 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF); 1837 /* Fall through */ 1838 case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF: 1839 /* If a module supports memory domains, we don't need to allocate bounce 1840 * buffers */ 1841 if (g_modules_opc[task->op_code].supports_memory_domains) { 1842 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK); 1843 break; 1844 } 1845 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF); 1846 rc = accel_sequence_check_bouncebuf(seq, task); 1847 if (spdk_unlikely(rc != 0)) { 1848 /* We couldn't allocate a buffer, wait until one is available */ 1849 if (rc == -EAGAIN) { 1850 break; 1851 } 1852 accel_sequence_set_fail(seq, rc); 1853 break; 1854 } 1855 if (task->has_aux && task->s.iovs == &task->aux->bounce.s.iov) { 1856 assert(task->aux->bounce.s.orig_iovs); 1857 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA); 1858 break; 1859 } 1860 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK); 1861 /* Fall through */ 1862 case ACCEL_SEQUENCE_STATE_EXEC_TASK: 1863 SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n", 1864 g_opcode_strings[task->op_code], seq); 1865 1866 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK); 1867 rc = accel_submit_task(accel_ch, task); 1868 if (spdk_unlikely(rc != 0)) { 1869 SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n", 1870 g_opcode_strings[task->op_code], seq); 1871 accel_sequence_set_fail(seq, rc); 1872 } 1873 break; 1874 case ACCEL_SEQUENCE_STATE_PULL_DATA: 1875 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA); 1876 accel_task_pull_data(seq, task); 1877 break; 1878 case ACCEL_SEQUENCE_STATE_COMPLETE_TASK: 1879 if (task->has_aux && task->d.iovs == &task->aux->bounce.d.iov) { 1880 assert(task->aux->bounce.d.orig_iovs); 1881 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA); 1882 break; 1883 } 1884 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK); 1885 break; 1886 case ACCEL_SEQUENCE_STATE_PUSH_DATA: 1887 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA); 1888 accel_task_push_data(seq, task); 1889 break; 1890 case ACCEL_SEQUENCE_STATE_NEXT_TASK: 1891 accel_sequence_complete_task(seq, task); 1892 /* Check if there are any remaining tasks */ 1893 task = TAILQ_FIRST(&seq->tasks); 1894 if (task == NULL) { 1895 /* Immediately return here to make sure we don't touch the sequence 1896 * after it's completed */ 1897 accel_sequence_complete(seq); 1898 return; 1899 } 1900 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT); 1901 break; 1902 case ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS: 1903 assert(!TAILQ_EMPTY(&seq->tasks)); 1904 1905 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS); 1906 rc = g_accel_driver->execute_sequence(accel_ch->driver_channel, seq); 1907 if (spdk_unlikely(rc != 0)) { 1908 SPDK_ERRLOG("Failed to execute sequence: %p using driver: %s\n", 1909 seq, g_accel_driver->name); 1910 accel_sequence_set_fail(seq, rc); 1911 } 1912 break; 1913 case ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS: 1914 /* Get the task again, as the driver might have completed some tasks 1915 * synchronously */ 1916 task = TAILQ_FIRST(&seq->tasks); 1917 if (task == NULL) { 1918 /* Immediately return here to make sure we don't touch the sequence 1919 * after it's completed */ 1920 accel_sequence_complete(seq); 1921 return; 1922 } 1923 /* We don't want to execute the next task through the driver, so we 1924 * explicitly omit the INIT state here */ 1925 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF); 1926 break; 1927 case ACCEL_SEQUENCE_STATE_ERROR: 1928 /* Immediately return here to make sure we don't touch the sequence 1929 * after it's completed */ 1930 assert(seq->status != 0); 1931 accel_sequence_complete(seq); 1932 return; 1933 case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF: 1934 case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF: 1935 case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA: 1936 case ACCEL_SEQUENCE_STATE_AWAIT_TASK: 1937 case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA: 1938 case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS: 1939 break; 1940 default: 1941 assert(0 && "bad state"); 1942 break; 1943 } 1944 } while (seq->state != state); 1945 1946 seq->in_process_sequence = false; 1947 } 1948 1949 static void 1950 accel_sequence_task_cb(void *cb_arg, int status) 1951 { 1952 struct spdk_accel_sequence *seq = cb_arg; 1953 struct spdk_accel_task *task = TAILQ_FIRST(&seq->tasks); 1954 1955 switch (seq->state) { 1956 case ACCEL_SEQUENCE_STATE_AWAIT_TASK: 1957 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK); 1958 if (spdk_unlikely(status != 0)) { 1959 SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n", 1960 g_opcode_strings[task->op_code], seq); 1961 accel_sequence_set_fail(seq, status); 1962 } 1963 1964 accel_process_sequence(seq); 1965 break; 1966 case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS: 1967 assert(g_accel_driver != NULL); 1968 /* Immediately remove the task from the outstanding list to make sure the next call 1969 * to spdk_accel_sequence_first_task() doesn't return it */ 1970 accel_sequence_complete_task(seq, task); 1971 if (spdk_unlikely(status != 0)) { 1972 SPDK_ERRLOG("Failed to execute %s operation, sequence: %p through " 1973 "driver: %s\n", g_opcode_strings[task->op_code], seq, 1974 g_accel_driver->name); 1975 /* Update status without using accel_sequence_set_fail() to avoid changing 1976 * seq's state to ERROR until driver calls spdk_accel_sequence_continue() */ 1977 seq->status = status; 1978 } 1979 break; 1980 default: 1981 assert(0 && "bad state"); 1982 break; 1983 } 1984 } 1985 1986 void 1987 spdk_accel_sequence_continue(struct spdk_accel_sequence *seq) 1988 { 1989 assert(g_accel_driver != NULL); 1990 assert(seq->state == ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS); 1991 1992 if (spdk_likely(seq->status == 0)) { 1993 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS); 1994 } else { 1995 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR); 1996 } 1997 1998 accel_process_sequence(seq); 1999 } 2000 2001 static bool 2002 accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt) 2003 { 2004 /* For now, just do a dumb check that the iovecs arrays are exactly the same */ 2005 if (iovacnt != iovbcnt) { 2006 return false; 2007 } 2008 2009 return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0; 2010 } 2011 2012 static bool 2013 accel_task_set_dstbuf(struct spdk_accel_task *task, struct spdk_accel_task *next) 2014 { 2015 struct spdk_accel_task *prev; 2016 2017 switch (task->op_code) { 2018 case SPDK_ACCEL_OPC_DECOMPRESS: 2019 case SPDK_ACCEL_OPC_FILL: 2020 case SPDK_ACCEL_OPC_ENCRYPT: 2021 case SPDK_ACCEL_OPC_DECRYPT: 2022 if (task->dst_domain != next->src_domain) { 2023 return false; 2024 } 2025 if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt, 2026 next->s.iovs, next->s.iovcnt)) { 2027 return false; 2028 } 2029 task->d.iovs = next->d.iovs; 2030 task->d.iovcnt = next->d.iovcnt; 2031 task->dst_domain = next->dst_domain; 2032 task->dst_domain_ctx = next->dst_domain_ctx; 2033 break; 2034 case SPDK_ACCEL_OPC_CRC32C: 2035 /* crc32 is special, because it doesn't have a dst buffer */ 2036 if (task->src_domain != next->src_domain) { 2037 return false; 2038 } 2039 if (!accel_compare_iovs(task->s.iovs, task->s.iovcnt, 2040 next->s.iovs, next->s.iovcnt)) { 2041 return false; 2042 } 2043 /* We can only change crc32's buffer if we can change previous task's buffer */ 2044 prev = TAILQ_PREV(task, accel_sequence_tasks, seq_link); 2045 if (prev == NULL) { 2046 return false; 2047 } 2048 if (!accel_task_set_dstbuf(prev, next)) { 2049 return false; 2050 } 2051 task->s.iovs = next->d.iovs; 2052 task->s.iovcnt = next->d.iovcnt; 2053 task->src_domain = next->dst_domain; 2054 task->src_domain_ctx = next->dst_domain_ctx; 2055 break; 2056 default: 2057 return false; 2058 } 2059 2060 return true; 2061 } 2062 2063 static void 2064 accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task, 2065 struct spdk_accel_task **next_task) 2066 { 2067 struct spdk_accel_task *next = *next_task; 2068 2069 switch (task->op_code) { 2070 case SPDK_ACCEL_OPC_COPY: 2071 /* We only allow changing src of operations that actually have a src, e.g. we never 2072 * do it for fill. Theoretically, it is possible, but we'd have to be careful to 2073 * change the src of the operation after fill (which in turn could also be a fill). 2074 * So, for the sake of simplicity, skip this type of operations for now. 2075 */ 2076 if (next->op_code != SPDK_ACCEL_OPC_DECOMPRESS && 2077 next->op_code != SPDK_ACCEL_OPC_COPY && 2078 next->op_code != SPDK_ACCEL_OPC_ENCRYPT && 2079 next->op_code != SPDK_ACCEL_OPC_DECRYPT && 2080 next->op_code != SPDK_ACCEL_OPC_COPY_CRC32C) { 2081 break; 2082 } 2083 if (task->dst_domain != next->src_domain) { 2084 break; 2085 } 2086 if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt, 2087 next->s.iovs, next->s.iovcnt)) { 2088 break; 2089 } 2090 next->s.iovs = task->s.iovs; 2091 next->s.iovcnt = task->s.iovcnt; 2092 next->src_domain = task->src_domain; 2093 next->src_domain_ctx = task->src_domain_ctx; 2094 accel_sequence_complete_task(seq, task); 2095 break; 2096 case SPDK_ACCEL_OPC_DECOMPRESS: 2097 case SPDK_ACCEL_OPC_FILL: 2098 case SPDK_ACCEL_OPC_ENCRYPT: 2099 case SPDK_ACCEL_OPC_DECRYPT: 2100 case SPDK_ACCEL_OPC_CRC32C: 2101 /* We can only merge tasks when one of them is a copy */ 2102 if (next->op_code != SPDK_ACCEL_OPC_COPY) { 2103 break; 2104 } 2105 if (!accel_task_set_dstbuf(task, next)) { 2106 break; 2107 } 2108 /* We're removing next_task from the tasks queue, so we need to update its pointer, 2109 * so that the TAILQ_FOREACH_SAFE() loop below works correctly */ 2110 *next_task = TAILQ_NEXT(next, seq_link); 2111 accel_sequence_complete_task(seq, next); 2112 break; 2113 default: 2114 assert(0 && "bad opcode"); 2115 break; 2116 } 2117 } 2118 2119 void 2120 spdk_accel_sequence_finish(struct spdk_accel_sequence *seq, 2121 spdk_accel_completion_cb cb_fn, void *cb_arg) 2122 { 2123 struct spdk_accel_task *task, *next; 2124 2125 /* Try to remove any copy operations if possible */ 2126 TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) { 2127 if (next == NULL) { 2128 break; 2129 } 2130 accel_sequence_merge_tasks(seq, task, &next); 2131 } 2132 2133 seq->cb_fn = cb_fn; 2134 seq->cb_arg = cb_arg; 2135 2136 accel_process_sequence(seq); 2137 } 2138 2139 void 2140 spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq) 2141 { 2142 struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks); 2143 struct spdk_accel_task *task; 2144 2145 TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link); 2146 2147 while (!TAILQ_EMPTY(&tasks)) { 2148 task = TAILQ_FIRST(&tasks); 2149 TAILQ_REMOVE(&tasks, task, seq_link); 2150 TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link); 2151 } 2152 } 2153 2154 void 2155 spdk_accel_sequence_abort(struct spdk_accel_sequence *seq) 2156 { 2157 if (seq == NULL) { 2158 return; 2159 } 2160 2161 accel_sequence_complete_tasks(seq); 2162 accel_sequence_put(seq); 2163 } 2164 2165 struct spdk_memory_domain * 2166 spdk_accel_get_memory_domain(void) 2167 { 2168 return g_accel_domain; 2169 } 2170 2171 static struct spdk_accel_module_if * 2172 _module_find_by_name(const char *name) 2173 { 2174 struct spdk_accel_module_if *accel_module = NULL; 2175 2176 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 2177 if (strcmp(name, accel_module->name) == 0) { 2178 break; 2179 } 2180 } 2181 2182 return accel_module; 2183 } 2184 2185 static inline struct spdk_accel_crypto_key * 2186 _accel_crypto_key_get(const char *name) 2187 { 2188 struct spdk_accel_crypto_key *key; 2189 2190 assert(spdk_spin_held(&g_keyring_spin)); 2191 2192 TAILQ_FOREACH(key, &g_keyring, link) { 2193 if (strcmp(name, key->param.key_name) == 0) { 2194 return key; 2195 } 2196 } 2197 2198 return NULL; 2199 } 2200 2201 static void 2202 accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key) 2203 { 2204 if (key->param.hex_key) { 2205 spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2); 2206 free(key->param.hex_key); 2207 } 2208 if (key->param.hex_key2) { 2209 spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2); 2210 free(key->param.hex_key2); 2211 } 2212 free(key->param.tweak_mode); 2213 free(key->param.key_name); 2214 free(key->param.cipher); 2215 if (key->key) { 2216 spdk_memset_s(key->key, key->key_size, 0, key->key_size); 2217 free(key->key); 2218 } 2219 if (key->key2) { 2220 spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size); 2221 free(key->key2); 2222 } 2223 free(key); 2224 } 2225 2226 static void 2227 accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key) 2228 { 2229 assert(key->module_if); 2230 assert(key->module_if->crypto_key_deinit); 2231 2232 key->module_if->crypto_key_deinit(key); 2233 accel_crypto_key_free_mem(key); 2234 } 2235 2236 /* 2237 * This function mitigates a timing side channel which could be caused by using strcmp() 2238 * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in 2239 * the article [1] for more details 2240 * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html 2241 */ 2242 static bool 2243 accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len) 2244 { 2245 size_t i; 2246 volatile size_t x = k1_len ^ k2_len; 2247 2248 for (i = 0; ((i < k1_len) & (i < k2_len)); i++) { 2249 x |= k1[i] ^ k2[i]; 2250 } 2251 2252 return x == 0; 2253 } 2254 2255 static const char *g_tweak_modes[] = { 2256 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA] = "SIMPLE_LBA", 2257 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_JOIN_NEG_LBA_WITH_LBA] = "JOIN_NEG_LBA_WITH_LBA", 2258 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_FULL_LBA] = "INCR_512_FULL_LBA", 2259 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_UPPER_LBA] = "INCR_512_UPPER_LBA", 2260 }; 2261 2262 static const char *g_ciphers[] = { 2263 [SPDK_ACCEL_CIPHER_AES_CBC] = "AES_CBC", 2264 [SPDK_ACCEL_CIPHER_AES_XTS] = "AES_XTS", 2265 }; 2266 2267 int 2268 spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param) 2269 { 2270 struct spdk_accel_module_if *module; 2271 struct spdk_accel_crypto_key *key; 2272 size_t hex_key_size, hex_key2_size; 2273 bool found = false; 2274 size_t i; 2275 int rc; 2276 2277 if (!param || !param->hex_key || !param->cipher || !param->key_name) { 2278 return -EINVAL; 2279 } 2280 2281 if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) { 2282 /* hardly ever possible, but let's check and warn the user */ 2283 SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n"); 2284 } 2285 module = g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module; 2286 2287 if (!module) { 2288 SPDK_ERRLOG("No accel module found assigned for crypto operation\n"); 2289 return -ENOENT; 2290 } 2291 2292 if (!module->crypto_key_init || !module->crypto_supports_cipher) { 2293 SPDK_ERRLOG("Module %s doesn't support crypto operations\n", module->name); 2294 return -ENOTSUP; 2295 } 2296 2297 key = calloc(1, sizeof(*key)); 2298 if (!key) { 2299 return -ENOMEM; 2300 } 2301 2302 key->param.key_name = strdup(param->key_name); 2303 if (!key->param.key_name) { 2304 rc = -ENOMEM; 2305 goto error; 2306 } 2307 2308 for (i = 0; i < SPDK_COUNTOF(g_ciphers); ++i) { 2309 assert(g_ciphers[i]); 2310 2311 if (strncmp(param->cipher, g_ciphers[i], strlen(g_ciphers[i])) == 0) { 2312 key->cipher = i; 2313 found = true; 2314 break; 2315 } 2316 } 2317 2318 if (!found) { 2319 SPDK_ERRLOG("Failed to parse cipher\n"); 2320 rc = -EINVAL; 2321 goto error; 2322 } 2323 2324 key->param.cipher = strdup(param->cipher); 2325 if (!key->param.cipher) { 2326 rc = -ENOMEM; 2327 goto error; 2328 } 2329 2330 hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2331 if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) { 2332 SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2333 rc = -EINVAL; 2334 goto error; 2335 } 2336 2337 if (hex_key_size == 0) { 2338 SPDK_ERRLOG("key1 size cannot be 0\n"); 2339 rc = -EINVAL; 2340 goto error; 2341 } 2342 2343 key->param.hex_key = strdup(param->hex_key); 2344 if (!key->param.hex_key) { 2345 rc = -ENOMEM; 2346 goto error; 2347 } 2348 2349 key->key_size = hex_key_size / 2; 2350 key->key = spdk_unhexlify(key->param.hex_key); 2351 if (!key->key) { 2352 SPDK_ERRLOG("Failed to unhexlify key1\n"); 2353 rc = -EINVAL; 2354 goto error; 2355 } 2356 2357 if (param->hex_key2) { 2358 hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2359 if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) { 2360 SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2361 rc = -EINVAL; 2362 goto error; 2363 } 2364 2365 if (hex_key2_size == 0) { 2366 SPDK_ERRLOG("key2 size cannot be 0\n"); 2367 rc = -EINVAL; 2368 goto error; 2369 } 2370 2371 key->param.hex_key2 = strdup(param->hex_key2); 2372 if (!key->param.hex_key2) { 2373 rc = -ENOMEM; 2374 goto error; 2375 } 2376 2377 key->key2_size = hex_key2_size / 2; 2378 key->key2 = spdk_unhexlify(key->param.hex_key2); 2379 if (!key->key2) { 2380 SPDK_ERRLOG("Failed to unhexlify key2\n"); 2381 rc = -EINVAL; 2382 goto error; 2383 } 2384 } 2385 2386 key->tweak_mode = ACCEL_CRYPTO_TWEAK_MODE_DEFAULT; 2387 if (param->tweak_mode) { 2388 found = false; 2389 2390 key->param.tweak_mode = strdup(param->tweak_mode); 2391 if (!key->param.tweak_mode) { 2392 rc = -ENOMEM; 2393 goto error; 2394 } 2395 2396 for (i = 0; i < SPDK_COUNTOF(g_tweak_modes); ++i) { 2397 assert(g_tweak_modes[i]); 2398 2399 if (strncmp(param->tweak_mode, g_tweak_modes[i], strlen(g_tweak_modes[i])) == 0) { 2400 key->tweak_mode = i; 2401 found = true; 2402 break; 2403 } 2404 } 2405 2406 if (!found) { 2407 SPDK_ERRLOG("Failed to parse tweak mode\n"); 2408 rc = -EINVAL; 2409 goto error; 2410 } 2411 } 2412 2413 if ((!module->crypto_supports_tweak_mode && key->tweak_mode != ACCEL_CRYPTO_TWEAK_MODE_DEFAULT) || 2414 (module->crypto_supports_tweak_mode && !module->crypto_supports_tweak_mode(key->tweak_mode))) { 2415 SPDK_ERRLOG("Module %s doesn't support %s tweak mode\n", module->name, 2416 g_tweak_modes[key->tweak_mode]); 2417 rc = -EINVAL; 2418 goto error; 2419 } 2420 2421 if (!module->crypto_supports_cipher(key->cipher, key->key_size)) { 2422 SPDK_ERRLOG("Module %s doesn't support %s cipher with %zu key size\n", module->name, 2423 g_ciphers[key->cipher], key->key_size); 2424 rc = -EINVAL; 2425 goto error; 2426 } 2427 2428 if (key->cipher == SPDK_ACCEL_CIPHER_AES_XTS) { 2429 if (!key->key2) { 2430 SPDK_ERRLOG("%s key2 is missing\n", g_ciphers[key->cipher]); 2431 rc = -EINVAL; 2432 goto error; 2433 } 2434 2435 if (key->key_size != key->key2_size) { 2436 SPDK_ERRLOG("%s key size %zu is not equal to key2 size %zu\n", g_ciphers[key->cipher], 2437 key->key_size, 2438 key->key2_size); 2439 rc = -EINVAL; 2440 goto error; 2441 } 2442 2443 if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) { 2444 SPDK_ERRLOG("%s identical keys are not secure\n", g_ciphers[key->cipher]); 2445 rc = -EINVAL; 2446 goto error; 2447 } 2448 } 2449 2450 if (key->cipher == SPDK_ACCEL_CIPHER_AES_CBC) { 2451 if (key->key2_size) { 2452 SPDK_ERRLOG("%s doesn't use key2\n", g_ciphers[key->cipher]); 2453 rc = -EINVAL; 2454 goto error; 2455 } 2456 } 2457 2458 key->module_if = module; 2459 2460 spdk_spin_lock(&g_keyring_spin); 2461 if (_accel_crypto_key_get(param->key_name)) { 2462 rc = -EEXIST; 2463 } else { 2464 rc = module->crypto_key_init(key); 2465 if (rc) { 2466 SPDK_ERRLOG("Module %s failed to initialize crypto key\n", module->name); 2467 } else { 2468 TAILQ_INSERT_TAIL(&g_keyring, key, link); 2469 } 2470 } 2471 spdk_spin_unlock(&g_keyring_spin); 2472 2473 if (rc) { 2474 goto error; 2475 } 2476 2477 return 0; 2478 2479 error: 2480 accel_crypto_key_free_mem(key); 2481 return rc; 2482 } 2483 2484 int 2485 spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key) 2486 { 2487 if (!key || !key->module_if) { 2488 return -EINVAL; 2489 } 2490 2491 spdk_spin_lock(&g_keyring_spin); 2492 if (!_accel_crypto_key_get(key->param.key_name)) { 2493 spdk_spin_unlock(&g_keyring_spin); 2494 return -ENOENT; 2495 } 2496 TAILQ_REMOVE(&g_keyring, key, link); 2497 spdk_spin_unlock(&g_keyring_spin); 2498 2499 accel_crypto_key_destroy_unsafe(key); 2500 2501 return 0; 2502 } 2503 2504 struct spdk_accel_crypto_key * 2505 spdk_accel_crypto_key_get(const char *name) 2506 { 2507 struct spdk_accel_crypto_key *key; 2508 2509 spdk_spin_lock(&g_keyring_spin); 2510 key = _accel_crypto_key_get(name); 2511 spdk_spin_unlock(&g_keyring_spin); 2512 2513 return key; 2514 } 2515 2516 /* Helper function when accel modules register with the framework. */ 2517 void 2518 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module) 2519 { 2520 struct spdk_accel_module_if *tmp; 2521 2522 if (_module_find_by_name(accel_module->name)) { 2523 SPDK_NOTICELOG("Module %s already registered\n", accel_module->name); 2524 assert(false); 2525 return; 2526 } 2527 2528 TAILQ_FOREACH(tmp, &spdk_accel_module_list, tailq) { 2529 if (accel_module->priority < tmp->priority) { 2530 break; 2531 } 2532 } 2533 2534 if (tmp != NULL) { 2535 TAILQ_INSERT_BEFORE(tmp, accel_module, tailq); 2536 } else { 2537 TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq); 2538 } 2539 } 2540 2541 /* Framework level channel create callback. */ 2542 static int 2543 accel_create_channel(void *io_device, void *ctx_buf) 2544 { 2545 struct accel_io_channel *accel_ch = ctx_buf; 2546 struct spdk_accel_task *accel_task; 2547 struct spdk_accel_task_aux_data *accel_task_aux; 2548 struct spdk_accel_sequence *seq; 2549 struct accel_buffer *buf; 2550 size_t task_size_aligned; 2551 uint8_t *task_mem; 2552 uint32_t i = 0, j; 2553 int rc; 2554 2555 task_size_aligned = SPDK_ALIGN_CEIL(g_max_accel_module_size, SPDK_CACHE_LINE_SIZE); 2556 accel_ch->task_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE, 2557 g_opts.task_count * task_size_aligned); 2558 if (!accel_ch->task_pool_base) { 2559 return -ENOMEM; 2560 } 2561 memset(accel_ch->task_pool_base, 0, g_opts.task_count * task_size_aligned); 2562 2563 accel_ch->seq_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE, 2564 g_opts.sequence_count * sizeof(struct spdk_accel_sequence)); 2565 if (accel_ch->seq_pool_base == NULL) { 2566 goto err; 2567 } 2568 memset(accel_ch->seq_pool_base, 0, g_opts.sequence_count * sizeof(struct spdk_accel_sequence)); 2569 2570 accel_ch->task_aux_data_base = calloc(g_opts.task_count, sizeof(struct spdk_accel_task_aux_data)); 2571 if (accel_ch->task_aux_data_base == NULL) { 2572 goto err; 2573 } 2574 2575 accel_ch->buf_pool_base = calloc(g_opts.buf_count, sizeof(struct accel_buffer)); 2576 if (accel_ch->buf_pool_base == NULL) { 2577 goto err; 2578 } 2579 2580 STAILQ_INIT(&accel_ch->task_pool); 2581 SLIST_INIT(&accel_ch->task_aux_data_pool); 2582 SLIST_INIT(&accel_ch->seq_pool); 2583 SLIST_INIT(&accel_ch->buf_pool); 2584 2585 task_mem = accel_ch->task_pool_base; 2586 for (i = 0; i < g_opts.task_count; i++) { 2587 accel_task = (struct spdk_accel_task *)task_mem; 2588 accel_task->aux = NULL; 2589 STAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link); 2590 task_mem += task_size_aligned; 2591 accel_task_aux = &accel_ch->task_aux_data_base[i]; 2592 SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task_aux, link); 2593 } 2594 for (i = 0; i < g_opts.sequence_count; i++) { 2595 seq = &accel_ch->seq_pool_base[i]; 2596 SLIST_INSERT_HEAD(&accel_ch->seq_pool, seq, link); 2597 } 2598 for (i = 0; i < g_opts.buf_count; i++) { 2599 buf = &accel_ch->buf_pool_base[i]; 2600 SLIST_INSERT_HEAD(&accel_ch->buf_pool, buf, link); 2601 } 2602 2603 /* Assign modules and get IO channels for each */ 2604 for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) { 2605 accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel(); 2606 /* This can happen if idxd runs out of channels. */ 2607 if (accel_ch->module_ch[i] == NULL) { 2608 SPDK_ERRLOG("Module %s failed to get io channel\n", g_modules_opc[i].module->name); 2609 goto err; 2610 } 2611 } 2612 2613 if (g_accel_driver != NULL) { 2614 accel_ch->driver_channel = g_accel_driver->get_io_channel(); 2615 if (accel_ch->driver_channel == NULL) { 2616 SPDK_ERRLOG("Failed to get driver's IO channel\n"); 2617 goto err; 2618 } 2619 } 2620 2621 rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", g_opts.small_cache_size, 2622 g_opts.large_cache_size); 2623 if (rc != 0) { 2624 SPDK_ERRLOG("Failed to initialize iobuf accel channel\n"); 2625 goto err; 2626 } 2627 2628 return 0; 2629 err: 2630 if (accel_ch->driver_channel != NULL) { 2631 spdk_put_io_channel(accel_ch->driver_channel); 2632 } 2633 for (j = 0; j < i; j++) { 2634 spdk_put_io_channel(accel_ch->module_ch[j]); 2635 } 2636 free(accel_ch->task_pool_base); 2637 free(accel_ch->task_aux_data_base); 2638 free(accel_ch->seq_pool_base); 2639 free(accel_ch->buf_pool_base); 2640 2641 return -ENOMEM; 2642 } 2643 2644 static void 2645 accel_add_stats(struct accel_stats *total, struct accel_stats *stats) 2646 { 2647 int i; 2648 2649 total->sequence_executed += stats->sequence_executed; 2650 total->sequence_failed += stats->sequence_failed; 2651 total->sequence_outstanding += stats->sequence_outstanding; 2652 total->task_outstanding += stats->task_outstanding; 2653 total->retry.task += stats->retry.task; 2654 total->retry.sequence += stats->retry.sequence; 2655 total->retry.iobuf += stats->retry.iobuf; 2656 total->retry.bufdesc += stats->retry.bufdesc; 2657 for (i = 0; i < SPDK_ACCEL_OPC_LAST; ++i) { 2658 total->operations[i].executed += stats->operations[i].executed; 2659 total->operations[i].failed += stats->operations[i].failed; 2660 total->operations[i].num_bytes += stats->operations[i].num_bytes; 2661 } 2662 } 2663 2664 /* Framework level channel destroy callback. */ 2665 static void 2666 accel_destroy_channel(void *io_device, void *ctx_buf) 2667 { 2668 struct accel_io_channel *accel_ch = ctx_buf; 2669 int i; 2670 2671 spdk_iobuf_channel_fini(&accel_ch->iobuf); 2672 2673 if (accel_ch->driver_channel != NULL) { 2674 spdk_put_io_channel(accel_ch->driver_channel); 2675 } 2676 2677 for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) { 2678 assert(accel_ch->module_ch[i] != NULL); 2679 spdk_put_io_channel(accel_ch->module_ch[i]); 2680 accel_ch->module_ch[i] = NULL; 2681 } 2682 2683 /* Update global stats to make sure channel's stats aren't lost after a channel is gone */ 2684 spdk_spin_lock(&g_stats_lock); 2685 accel_add_stats(&g_stats, &accel_ch->stats); 2686 spdk_spin_unlock(&g_stats_lock); 2687 2688 free(accel_ch->task_pool_base); 2689 free(accel_ch->task_aux_data_base); 2690 free(accel_ch->seq_pool_base); 2691 free(accel_ch->buf_pool_base); 2692 } 2693 2694 struct spdk_io_channel * 2695 spdk_accel_get_io_channel(void) 2696 { 2697 return spdk_get_io_channel(&spdk_accel_module_list); 2698 } 2699 2700 static int 2701 accel_module_initialize(void) 2702 { 2703 struct spdk_accel_module_if *accel_module, *tmp_module; 2704 int rc = 0, module_rc; 2705 2706 TAILQ_FOREACH_SAFE(accel_module, &spdk_accel_module_list, tailq, tmp_module) { 2707 module_rc = accel_module->module_init(); 2708 if (module_rc) { 2709 TAILQ_REMOVE(&spdk_accel_module_list, accel_module, tailq); 2710 if (module_rc == -ENODEV) { 2711 SPDK_NOTICELOG("No devices for module %s, skipping\n", accel_module->name); 2712 } else if (!rc) { 2713 SPDK_ERRLOG("Module %s initialization failed with %d\n", accel_module->name, module_rc); 2714 rc = module_rc; 2715 } 2716 continue; 2717 } 2718 2719 SPDK_DEBUGLOG(accel, "Module %s initialized.\n", accel_module->name); 2720 } 2721 2722 return rc; 2723 } 2724 2725 static void 2726 accel_module_init_opcode(enum spdk_accel_opcode opcode) 2727 { 2728 struct accel_module *module = &g_modules_opc[opcode]; 2729 struct spdk_accel_module_if *module_if = module->module; 2730 2731 if (module_if->get_memory_domains != NULL) { 2732 module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0; 2733 } 2734 } 2735 2736 static int 2737 accel_memory_domain_translate(struct spdk_memory_domain *src_domain, void *src_domain_ctx, 2738 struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx, 2739 void *addr, size_t len, struct spdk_memory_domain_translation_result *result) 2740 { 2741 struct accel_buffer *buf = src_domain_ctx; 2742 2743 SPDK_DEBUGLOG(accel, "translate addr %p, len %zu\n", addr, len); 2744 2745 assert(g_accel_domain == src_domain); 2746 assert(spdk_memory_domain_get_system_domain() == dst_domain); 2747 assert(buf->buf == NULL); 2748 assert(addr == ACCEL_BUFFER_BASE); 2749 assert(len == buf->len); 2750 2751 buf->buf = spdk_iobuf_get(&buf->ch->iobuf, buf->len, NULL, NULL); 2752 if (spdk_unlikely(buf->buf == NULL)) { 2753 return -ENOMEM; 2754 } 2755 2756 result->iov_count = 1; 2757 result->iov.iov_base = buf->buf; 2758 result->iov.iov_len = buf->len; 2759 SPDK_DEBUGLOG(accel, "translated addr %p\n", result->iov.iov_base); 2760 return 0; 2761 } 2762 2763 static void 2764 accel_memory_domain_invalidate(struct spdk_memory_domain *domain, void *domain_ctx, 2765 struct iovec *iov, uint32_t iovcnt) 2766 { 2767 struct accel_buffer *buf = domain_ctx; 2768 2769 SPDK_DEBUGLOG(accel, "invalidate addr %p, len %zu\n", iov[0].iov_base, iov[0].iov_len); 2770 2771 assert(g_accel_domain == domain); 2772 assert(iovcnt == 1); 2773 assert(buf->buf != NULL); 2774 assert(iov[0].iov_base == buf->buf); 2775 assert(iov[0].iov_len == buf->len); 2776 2777 spdk_iobuf_put(&buf->ch->iobuf, buf->buf, buf->len); 2778 buf->buf = NULL; 2779 } 2780 2781 int 2782 spdk_accel_initialize(void) 2783 { 2784 enum spdk_accel_opcode op; 2785 struct spdk_accel_module_if *accel_module = NULL; 2786 int rc; 2787 2788 /* 2789 * We need a unique identifier for the accel framework, so use the 2790 * spdk_accel_module_list address for this purpose. 2791 */ 2792 spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel, 2793 sizeof(struct accel_io_channel), "accel"); 2794 2795 spdk_spin_init(&g_keyring_spin); 2796 spdk_spin_init(&g_stats_lock); 2797 2798 rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL, 2799 "SPDK_ACCEL_DMA_DEVICE"); 2800 if (rc != 0) { 2801 SPDK_ERRLOG("Failed to create accel memory domain\n"); 2802 return rc; 2803 } 2804 2805 spdk_memory_domain_set_translation(g_accel_domain, accel_memory_domain_translate); 2806 spdk_memory_domain_set_invalidate(g_accel_domain, accel_memory_domain_invalidate); 2807 2808 g_modules_started = true; 2809 rc = accel_module_initialize(); 2810 if (rc) { 2811 return rc; 2812 } 2813 2814 if (g_accel_driver != NULL && g_accel_driver->init != NULL) { 2815 rc = g_accel_driver->init(); 2816 if (rc != 0) { 2817 SPDK_ERRLOG("Failed to initialize driver %s: %s\n", g_accel_driver->name, 2818 spdk_strerror(-rc)); 2819 return rc; 2820 } 2821 } 2822 2823 /* The module list is order by priority, with the highest priority modules being at the end 2824 * of the list. The software module should be somewhere at the beginning of the list, 2825 * before all HW modules. 2826 * NOTE: all opcodes must be supported by software in the event that no HW modules are 2827 * initialized to support the operation. 2828 */ 2829 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 2830 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 2831 if (accel_module->supports_opcode(op)) { 2832 g_modules_opc[op].module = accel_module; 2833 SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name); 2834 } 2835 } 2836 2837 if (accel_module->get_ctx_size != NULL) { 2838 g_max_accel_module_size = spdk_max(g_max_accel_module_size, 2839 accel_module->get_ctx_size()); 2840 } 2841 } 2842 2843 /* Now lets check for overrides and apply all that exist */ 2844 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 2845 if (g_modules_opc_override[op] != NULL) { 2846 accel_module = _module_find_by_name(g_modules_opc_override[op]); 2847 if (accel_module == NULL) { 2848 SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]); 2849 return -EINVAL; 2850 } 2851 if (accel_module->supports_opcode(op) == false) { 2852 SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op); 2853 return -EINVAL; 2854 } 2855 g_modules_opc[op].module = accel_module; 2856 } 2857 } 2858 2859 if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) { 2860 SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations"); 2861 return -EINVAL; 2862 } 2863 2864 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 2865 assert(g_modules_opc[op].module != NULL); 2866 accel_module_init_opcode(op); 2867 } 2868 2869 rc = spdk_iobuf_register_module("accel"); 2870 if (rc != 0) { 2871 SPDK_ERRLOG("Failed to register accel iobuf module\n"); 2872 return rc; 2873 } 2874 2875 return 0; 2876 } 2877 2878 static void 2879 accel_module_finish_cb(void) 2880 { 2881 spdk_accel_fini_cb cb_fn = g_fini_cb_fn; 2882 2883 cb_fn(g_fini_cb_arg); 2884 g_fini_cb_fn = NULL; 2885 g_fini_cb_arg = NULL; 2886 } 2887 2888 static void 2889 accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str, 2890 const char *module_str) 2891 { 2892 spdk_json_write_object_begin(w); 2893 spdk_json_write_named_string(w, "method", "accel_assign_opc"); 2894 spdk_json_write_named_object_begin(w, "params"); 2895 spdk_json_write_named_string(w, "opname", opc_str); 2896 spdk_json_write_named_string(w, "module", module_str); 2897 spdk_json_write_object_end(w); 2898 spdk_json_write_object_end(w); 2899 } 2900 2901 static void 2902 __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key) 2903 { 2904 spdk_json_write_named_string(w, "name", key->param.key_name); 2905 spdk_json_write_named_string(w, "cipher", key->param.cipher); 2906 spdk_json_write_named_string(w, "key", key->param.hex_key); 2907 if (key->param.hex_key2) { 2908 spdk_json_write_named_string(w, "key2", key->param.hex_key2); 2909 } 2910 2911 if (key->param.tweak_mode) { 2912 spdk_json_write_named_string(w, "tweak_mode", key->param.tweak_mode); 2913 } 2914 } 2915 2916 void 2917 _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key) 2918 { 2919 spdk_json_write_object_begin(w); 2920 __accel_crypto_key_dump_param(w, key); 2921 spdk_json_write_object_end(w); 2922 } 2923 2924 static void 2925 _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w, 2926 struct spdk_accel_crypto_key *key) 2927 { 2928 spdk_json_write_object_begin(w); 2929 spdk_json_write_named_string(w, "method", "accel_crypto_key_create"); 2930 spdk_json_write_named_object_begin(w, "params"); 2931 __accel_crypto_key_dump_param(w, key); 2932 spdk_json_write_object_end(w); 2933 spdk_json_write_object_end(w); 2934 } 2935 2936 static void 2937 accel_write_options(struct spdk_json_write_ctx *w) 2938 { 2939 spdk_json_write_object_begin(w); 2940 spdk_json_write_named_string(w, "method", "accel_set_options"); 2941 spdk_json_write_named_object_begin(w, "params"); 2942 spdk_json_write_named_uint32(w, "small_cache_size", g_opts.small_cache_size); 2943 spdk_json_write_named_uint32(w, "large_cache_size", g_opts.large_cache_size); 2944 spdk_json_write_named_uint32(w, "task_count", g_opts.task_count); 2945 spdk_json_write_named_uint32(w, "sequence_count", g_opts.sequence_count); 2946 spdk_json_write_named_uint32(w, "buf_count", g_opts.buf_count); 2947 spdk_json_write_object_end(w); 2948 spdk_json_write_object_end(w); 2949 } 2950 2951 static void 2952 _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump) 2953 { 2954 struct spdk_accel_crypto_key *key; 2955 2956 spdk_spin_lock(&g_keyring_spin); 2957 TAILQ_FOREACH(key, &g_keyring, link) { 2958 if (full_dump) { 2959 _accel_crypto_key_write_config_json(w, key); 2960 } else { 2961 _accel_crypto_key_dump_param(w, key); 2962 } 2963 } 2964 spdk_spin_unlock(&g_keyring_spin); 2965 } 2966 2967 void 2968 _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w) 2969 { 2970 _accel_crypto_keys_write_config_json(w, false); 2971 } 2972 2973 void 2974 spdk_accel_write_config_json(struct spdk_json_write_ctx *w) 2975 { 2976 struct spdk_accel_module_if *accel_module; 2977 int i; 2978 2979 spdk_json_write_array_begin(w); 2980 accel_write_options(w); 2981 2982 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 2983 if (accel_module->write_config_json) { 2984 accel_module->write_config_json(w); 2985 } 2986 } 2987 for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) { 2988 if (g_modules_opc_override[i]) { 2989 accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]); 2990 } 2991 } 2992 2993 _accel_crypto_keys_write_config_json(w, true); 2994 2995 spdk_json_write_array_end(w); 2996 } 2997 2998 void 2999 spdk_accel_module_finish(void) 3000 { 3001 if (!g_accel_module) { 3002 g_accel_module = TAILQ_FIRST(&spdk_accel_module_list); 3003 } else { 3004 g_accel_module = TAILQ_NEXT(g_accel_module, tailq); 3005 } 3006 3007 if (!g_accel_module) { 3008 if (g_accel_driver != NULL && g_accel_driver->fini != NULL) { 3009 g_accel_driver->fini(); 3010 } 3011 3012 spdk_spin_destroy(&g_keyring_spin); 3013 spdk_spin_destroy(&g_stats_lock); 3014 if (g_accel_domain) { 3015 spdk_memory_domain_destroy(g_accel_domain); 3016 g_accel_domain = NULL; 3017 } 3018 accel_module_finish_cb(); 3019 return; 3020 } 3021 3022 if (g_accel_module->module_fini) { 3023 spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL); 3024 } else { 3025 spdk_accel_module_finish(); 3026 } 3027 } 3028 3029 static void 3030 accel_io_device_unregister_cb(void *io_device) 3031 { 3032 struct spdk_accel_crypto_key *key, *key_tmp; 3033 enum spdk_accel_opcode op; 3034 3035 spdk_spin_lock(&g_keyring_spin); 3036 TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) { 3037 accel_crypto_key_destroy_unsafe(key); 3038 } 3039 spdk_spin_unlock(&g_keyring_spin); 3040 3041 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 3042 if (g_modules_opc_override[op] != NULL) { 3043 free(g_modules_opc_override[op]); 3044 g_modules_opc_override[op] = NULL; 3045 } 3046 g_modules_opc[op].module = NULL; 3047 } 3048 3049 spdk_accel_module_finish(); 3050 } 3051 3052 void 3053 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg) 3054 { 3055 assert(cb_fn != NULL); 3056 3057 g_fini_cb_fn = cb_fn; 3058 g_fini_cb_arg = cb_arg; 3059 3060 spdk_io_device_unregister(&spdk_accel_module_list, accel_io_device_unregister_cb); 3061 } 3062 3063 static struct spdk_accel_driver * 3064 accel_find_driver(const char *name) 3065 { 3066 struct spdk_accel_driver *driver; 3067 3068 TAILQ_FOREACH(driver, &g_accel_drivers, tailq) { 3069 if (strcmp(driver->name, name) == 0) { 3070 return driver; 3071 } 3072 } 3073 3074 return NULL; 3075 } 3076 3077 int 3078 spdk_accel_set_driver(const char *name) 3079 { 3080 struct spdk_accel_driver *driver; 3081 3082 driver = accel_find_driver(name); 3083 if (driver == NULL) { 3084 SPDK_ERRLOG("Couldn't find driver named '%s'\n", name); 3085 return -ENODEV; 3086 } 3087 3088 g_accel_driver = driver; 3089 3090 return 0; 3091 } 3092 3093 const char * 3094 spdk_accel_get_driver_name(void) 3095 { 3096 if (!g_accel_driver) { 3097 return NULL; 3098 } 3099 3100 return g_accel_driver->name; 3101 } 3102 3103 void 3104 spdk_accel_driver_register(struct spdk_accel_driver *driver) 3105 { 3106 if (accel_find_driver(driver->name)) { 3107 SPDK_ERRLOG("Driver named '%s' has already been registered\n", driver->name); 3108 assert(0); 3109 return; 3110 } 3111 3112 TAILQ_INSERT_TAIL(&g_accel_drivers, driver, tailq); 3113 } 3114 3115 int 3116 spdk_accel_set_opts(const struct spdk_accel_opts *opts) 3117 { 3118 if (!opts) { 3119 SPDK_ERRLOG("opts cannot be NULL\n"); 3120 return -1; 3121 } 3122 3123 if (!opts->opts_size) { 3124 SPDK_ERRLOG("opts_size inside opts cannot be zero value\n"); 3125 return -1; 3126 } 3127 3128 if (SPDK_GET_FIELD(opts, task_count, g_opts.task_count, 3129 opts->opts_size) < ACCEL_TASKS_IN_SEQUENCE_LIMIT) { 3130 return -EINVAL; 3131 } 3132 3133 #define SET_FIELD(field) \ 3134 if (offsetof(struct spdk_accel_opts, field) + sizeof(opts->field) <= opts->opts_size) { \ 3135 g_opts.field = opts->field; \ 3136 } \ 3137 3138 SET_FIELD(small_cache_size); 3139 SET_FIELD(large_cache_size); 3140 SET_FIELD(task_count); 3141 SET_FIELD(sequence_count); 3142 SET_FIELD(buf_count); 3143 3144 g_opts.opts_size = opts->opts_size; 3145 3146 #undef SET_FIELD 3147 3148 return 0; 3149 } 3150 3151 void 3152 spdk_accel_get_opts(struct spdk_accel_opts *opts, size_t opts_size) 3153 { 3154 if (!opts) { 3155 SPDK_ERRLOG("opts should not be NULL\n"); 3156 return; 3157 } 3158 3159 if (!opts_size) { 3160 SPDK_ERRLOG("opts_size should not be zero value\n"); 3161 return; 3162 } 3163 3164 opts->opts_size = opts_size; 3165 3166 #define SET_FIELD(field) \ 3167 if (offsetof(struct spdk_accel_opts, field) + sizeof(opts->field) <= opts_size) { \ 3168 opts->field = g_opts.field; \ 3169 } \ 3170 3171 SET_FIELD(small_cache_size); 3172 SET_FIELD(large_cache_size); 3173 SET_FIELD(task_count); 3174 SET_FIELD(sequence_count); 3175 SET_FIELD(buf_count); 3176 3177 #undef SET_FIELD 3178 3179 /* Do not remove this statement, you should always update this statement when you adding a new field, 3180 * and do not forget to add the SET_FIELD statement for your added field. */ 3181 SPDK_STATIC_ASSERT(sizeof(struct spdk_accel_opts) == 28, "Incorrect size"); 3182 } 3183 3184 struct accel_get_stats_ctx { 3185 struct accel_stats stats; 3186 accel_get_stats_cb cb_fn; 3187 void *cb_arg; 3188 }; 3189 3190 static void 3191 accel_get_channel_stats_done(struct spdk_io_channel_iter *iter, int status) 3192 { 3193 struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter); 3194 3195 ctx->cb_fn(&ctx->stats, ctx->cb_arg); 3196 free(ctx); 3197 } 3198 3199 static void 3200 accel_get_channel_stats(struct spdk_io_channel_iter *iter) 3201 { 3202 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(iter); 3203 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 3204 struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter); 3205 3206 accel_add_stats(&ctx->stats, &accel_ch->stats); 3207 spdk_for_each_channel_continue(iter, 0); 3208 } 3209 3210 int 3211 accel_get_stats(accel_get_stats_cb cb_fn, void *cb_arg) 3212 { 3213 struct accel_get_stats_ctx *ctx; 3214 3215 ctx = calloc(1, sizeof(*ctx)); 3216 if (ctx == NULL) { 3217 return -ENOMEM; 3218 } 3219 3220 spdk_spin_lock(&g_stats_lock); 3221 accel_add_stats(&ctx->stats, &g_stats); 3222 spdk_spin_unlock(&g_stats_lock); 3223 3224 ctx->cb_fn = cb_fn; 3225 ctx->cb_arg = cb_arg; 3226 3227 spdk_for_each_channel(&spdk_accel_module_list, accel_get_channel_stats, ctx, 3228 accel_get_channel_stats_done); 3229 3230 return 0; 3231 } 3232 3233 void 3234 spdk_accel_get_opcode_stats(struct spdk_io_channel *ch, enum spdk_accel_opcode opcode, 3235 struct spdk_accel_opcode_stats *stats, size_t size) 3236 { 3237 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 3238 3239 #define FIELD_OK(field) \ 3240 offsetof(struct spdk_accel_opcode_stats, field) + sizeof(stats->field) <= size 3241 3242 #define SET_FIELD(field, value) \ 3243 if (FIELD_OK(field)) { \ 3244 stats->field = value; \ 3245 } 3246 3247 SET_FIELD(executed, accel_ch->stats.operations[opcode].executed); 3248 SET_FIELD(failed, accel_ch->stats.operations[opcode].failed); 3249 SET_FIELD(num_bytes, accel_ch->stats.operations[opcode].num_bytes); 3250 3251 #undef FIELD_OK 3252 #undef SET_FIELD 3253 } 3254 3255 uint8_t 3256 spdk_accel_get_buf_align(enum spdk_accel_opcode opcode, 3257 const struct spdk_accel_operation_exec_ctx *ctx) 3258 { 3259 struct spdk_accel_module_if *module = g_modules_opc[opcode].module; 3260 struct spdk_accel_opcode_info modinfo = {}, drvinfo = {}; 3261 3262 if (g_accel_driver != NULL && g_accel_driver->get_operation_info != NULL) { 3263 g_accel_driver->get_operation_info(opcode, ctx, &drvinfo); 3264 } 3265 3266 if (module->get_operation_info != NULL) { 3267 module->get_operation_info(opcode, ctx, &modinfo); 3268 } 3269 3270 /* If a driver is set, it'll execute most of the operations, while the rest will usually 3271 * fall back to accel_sw, which doesn't have any alignment requirements. However, to be 3272 * extra safe, return the max(driver, module) if a driver delegates some operations to a 3273 * hardware module. */ 3274 return spdk_max(modinfo.required_alignment, drvinfo.required_alignment); 3275 } 3276 3277 struct spdk_accel_module_if * 3278 spdk_accel_get_module(const char *name) 3279 { 3280 struct spdk_accel_module_if *module; 3281 3282 TAILQ_FOREACH(module, &spdk_accel_module_list, tailq) { 3283 if (strcmp(module->name, name) == 0) { 3284 return module; 3285 } 3286 } 3287 3288 return NULL; 3289 } 3290 3291 int 3292 spdk_accel_get_opc_memory_domains(enum spdk_accel_opcode opcode, 3293 struct spdk_memory_domain **domains, 3294 int array_size) 3295 { 3296 assert(opcode < SPDK_ACCEL_OPC_LAST); 3297 3298 if (g_modules_opc[opcode].module->get_memory_domains) { 3299 return g_modules_opc[opcode].module->get_memory_domains(domains, array_size); 3300 } 3301 3302 return 0; 3303 } 3304 3305 SPDK_LOG_REGISTER_COMPONENT(accel) 3306