1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2020 Intel Corporation. 3 * Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES. 4 * All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk/accel_module.h" 10 11 #include "accel_internal.h" 12 13 #include "spdk/dma.h" 14 #include "spdk/env.h" 15 #include "spdk/likely.h" 16 #include "spdk/log.h" 17 #include "spdk/thread.h" 18 #include "spdk/json.h" 19 #include "spdk/crc32.h" 20 #include "spdk/util.h" 21 #include "spdk/hexlify.h" 22 #include "spdk/string.h" 23 24 /* Accelerator Framework: The following provides a top level 25 * generic API for the accelerator functions defined here. Modules, 26 * such as the one in /module/accel/ioat, supply the implementation 27 * with the exception of the pure software implementation contained 28 * later in this file. 29 */ 30 31 #define ALIGN_4K 0x1000 32 #define MAX_TASKS_PER_CHANNEL 0x800 33 #define ACCEL_SMALL_CACHE_SIZE 128 34 #define ACCEL_LARGE_CACHE_SIZE 16 35 /* Set MSB, so we don't return NULL pointers as buffers */ 36 #define ACCEL_BUFFER_BASE ((void *)(1ull << 63)) 37 #define ACCEL_BUFFER_OFFSET_MASK ((uintptr_t)ACCEL_BUFFER_BASE - 1) 38 39 #define ACCEL_CRYPTO_TWEAK_MODE_DEFAULT SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA 40 41 struct accel_module { 42 struct spdk_accel_module_if *module; 43 bool supports_memory_domains; 44 }; 45 46 /* Largest context size for all accel modules */ 47 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task); 48 49 static struct spdk_accel_module_if *g_accel_module = NULL; 50 static spdk_accel_fini_cb g_fini_cb_fn = NULL; 51 static void *g_fini_cb_arg = NULL; 52 static bool g_modules_started = false; 53 static struct spdk_memory_domain *g_accel_domain; 54 55 /* Global list of registered accelerator modules */ 56 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list = 57 TAILQ_HEAD_INITIALIZER(spdk_accel_module_list); 58 59 /* Crypto keyring */ 60 static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring); 61 static struct spdk_spinlock g_keyring_spin; 62 63 /* Global array mapping capabilities to modules */ 64 static struct accel_module g_modules_opc[SPDK_ACCEL_OPC_LAST] = {}; 65 static char *g_modules_opc_override[SPDK_ACCEL_OPC_LAST] = {}; 66 TAILQ_HEAD(, spdk_accel_driver) g_accel_drivers = TAILQ_HEAD_INITIALIZER(g_accel_drivers); 67 static struct spdk_accel_driver *g_accel_driver; 68 static struct spdk_accel_opts g_opts = { 69 .small_cache_size = ACCEL_SMALL_CACHE_SIZE, 70 .large_cache_size = ACCEL_LARGE_CACHE_SIZE, 71 .task_count = MAX_TASKS_PER_CHANNEL, 72 .sequence_count = MAX_TASKS_PER_CHANNEL, 73 .buf_count = MAX_TASKS_PER_CHANNEL, 74 }; 75 static struct accel_stats g_stats; 76 static struct spdk_spinlock g_stats_lock; 77 78 static const char *g_opcode_strings[SPDK_ACCEL_OPC_LAST] = { 79 "copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c", 80 "compress", "decompress", "encrypt", "decrypt", "xor" 81 }; 82 83 enum accel_sequence_state { 84 ACCEL_SEQUENCE_STATE_INIT, 85 ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF, 86 ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF, 87 ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF, 88 ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF, 89 ACCEL_SEQUENCE_STATE_PULL_DATA, 90 ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA, 91 ACCEL_SEQUENCE_STATE_EXEC_TASK, 92 ACCEL_SEQUENCE_STATE_AWAIT_TASK, 93 ACCEL_SEQUENCE_STATE_COMPLETE_TASK, 94 ACCEL_SEQUENCE_STATE_NEXT_TASK, 95 ACCEL_SEQUENCE_STATE_PUSH_DATA, 96 ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA, 97 ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS, 98 ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS, 99 ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS, 100 ACCEL_SEQUENCE_STATE_ERROR, 101 ACCEL_SEQUENCE_STATE_MAX, 102 }; 103 104 static const char *g_seq_states[] 105 __attribute__((unused)) = { 106 [ACCEL_SEQUENCE_STATE_INIT] = "init", 107 [ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf", 108 [ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf", 109 [ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf", 110 [ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf", 111 [ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data", 112 [ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data", 113 [ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task", 114 [ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task", 115 [ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task", 116 [ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task", 117 [ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data", 118 [ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data", 119 [ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS] = "driver-exec-tasks", 120 [ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS] = "driver-await-tasks", 121 [ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS] = "driver-complete-tasks", 122 [ACCEL_SEQUENCE_STATE_ERROR] = "error", 123 [ACCEL_SEQUENCE_STATE_MAX] = "", 124 }; 125 126 #define ACCEL_SEQUENCE_STATE_STRING(s) \ 127 (((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \ 128 ? g_seq_states[s] : "unknown") 129 130 struct accel_buffer { 131 struct spdk_accel_sequence *seq; 132 void *buf; 133 uint64_t len; 134 struct spdk_iobuf_entry iobuf; 135 spdk_accel_sequence_get_buf_cb cb_fn; 136 void *cb_ctx; 137 SLIST_ENTRY(accel_buffer) link; 138 }; 139 140 struct accel_io_channel { 141 struct spdk_io_channel *module_ch[SPDK_ACCEL_OPC_LAST]; 142 struct spdk_io_channel *driver_channel; 143 void *task_pool_base; 144 struct spdk_accel_sequence *seq_pool_base; 145 struct accel_buffer *buf_pool_base; 146 TAILQ_HEAD(, spdk_accel_task) task_pool; 147 SLIST_HEAD(, spdk_accel_sequence) seq_pool; 148 SLIST_HEAD(, accel_buffer) buf_pool; 149 struct spdk_iobuf_channel iobuf; 150 struct accel_stats stats; 151 }; 152 153 TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task); 154 155 struct spdk_accel_sequence { 156 struct accel_io_channel *ch; 157 struct accel_sequence_tasks tasks; 158 SLIST_HEAD(, accel_buffer) bounce_bufs; 159 int status; 160 /* state uses enum accel_sequence_state */ 161 uint8_t state; 162 bool in_process_sequence; 163 spdk_accel_completion_cb cb_fn; 164 void *cb_arg; 165 SLIST_ENTRY(spdk_accel_sequence) link; 166 }; 167 SPDK_STATIC_ASSERT(sizeof(struct spdk_accel_sequence) == 64, "invalid size"); 168 169 #define accel_update_stats(ch, event, v) \ 170 do { \ 171 (ch)->stats.event += (v); \ 172 } while (0) 173 174 #define accel_update_task_stats(ch, task, event, v) \ 175 accel_update_stats(ch, operations[(task)->op_code].event, v) 176 177 static inline void 178 accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state) 179 { 180 SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq, 181 ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state)); 182 assert(seq->state != ACCEL_SEQUENCE_STATE_ERROR || state == ACCEL_SEQUENCE_STATE_ERROR); 183 seq->state = state; 184 } 185 186 static void 187 accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status) 188 { 189 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR); 190 assert(status != 0); 191 seq->status = status; 192 } 193 194 int 195 spdk_accel_get_opc_module_name(enum spdk_accel_opcode opcode, const char **module_name) 196 { 197 if (opcode >= SPDK_ACCEL_OPC_LAST) { 198 /* invalid opcode */ 199 return -EINVAL; 200 } 201 202 if (g_modules_opc[opcode].module) { 203 *module_name = g_modules_opc[opcode].module->name; 204 } else { 205 return -ENOENT; 206 } 207 208 return 0; 209 } 210 211 void 212 _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn) 213 { 214 struct spdk_accel_module_if *accel_module; 215 enum spdk_accel_opcode opcode; 216 int j = 0; 217 218 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 219 for (opcode = 0; opcode < SPDK_ACCEL_OPC_LAST; opcode++) { 220 if (accel_module->supports_opcode(opcode)) { 221 info->ops[j] = opcode; 222 j++; 223 } 224 } 225 info->name = accel_module->name; 226 info->num_ops = j; 227 fn(info); 228 j = 0; 229 } 230 } 231 232 const char * 233 spdk_accel_get_opcode_name(enum spdk_accel_opcode opcode) 234 { 235 if (opcode < SPDK_ACCEL_OPC_LAST) { 236 return g_opcode_strings[opcode]; 237 } 238 239 return NULL; 240 } 241 242 int 243 spdk_accel_assign_opc(enum spdk_accel_opcode opcode, const char *name) 244 { 245 char *copy; 246 247 if (g_modules_started == true) { 248 /* we don't allow re-assignment once things have started */ 249 return -EINVAL; 250 } 251 252 if (opcode >= SPDK_ACCEL_OPC_LAST) { 253 /* invalid opcode */ 254 return -EINVAL; 255 } 256 257 copy = strdup(name); 258 if (copy == NULL) { 259 return -ENOMEM; 260 } 261 262 /* module selection will be validated after the framework starts. */ 263 free(g_modules_opc_override[opcode]); 264 g_modules_opc_override[opcode] = copy; 265 266 return 0; 267 } 268 269 void 270 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status) 271 { 272 struct accel_io_channel *accel_ch = accel_task->accel_ch; 273 spdk_accel_completion_cb cb_fn = accel_task->cb_fn; 274 void *cb_arg = accel_task->cb_arg; 275 276 /* We should put the accel_task into the list firstly in order to avoid 277 * the accel task list is exhausted when there is recursive call to 278 * allocate accel_task in user's call back function (cb_fn) 279 */ 280 TAILQ_INSERT_HEAD(&accel_ch->task_pool, accel_task, link); 281 accel_task->seq = NULL; 282 283 accel_update_task_stats(accel_ch, accel_task, executed, 1); 284 accel_update_task_stats(accel_ch, accel_task, num_bytes, accel_task->nbytes); 285 if (spdk_unlikely(status != 0)) { 286 accel_update_task_stats(accel_ch, accel_task, failed, 1); 287 } 288 289 cb_fn(cb_arg, status); 290 } 291 292 inline static struct spdk_accel_task * 293 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg) 294 { 295 struct spdk_accel_task *accel_task; 296 297 accel_task = TAILQ_FIRST(&accel_ch->task_pool); 298 if (spdk_unlikely(accel_task == NULL)) { 299 accel_update_stats(accel_ch, retry.task, 1); 300 return NULL; 301 } 302 303 TAILQ_REMOVE(&accel_ch->task_pool, accel_task, link); 304 accel_task->link.tqe_next = NULL; 305 accel_task->link.tqe_prev = NULL; 306 307 accel_task->cb_fn = cb_fn; 308 accel_task->cb_arg = cb_arg; 309 accel_task->accel_ch = accel_ch; 310 accel_task->s.iovs = NULL; 311 accel_task->d.iovs = NULL; 312 313 return accel_task; 314 } 315 316 static inline int 317 accel_submit_task(struct accel_io_channel *accel_ch, struct spdk_accel_task *task) 318 { 319 struct spdk_io_channel *module_ch = accel_ch->module_ch[task->op_code]; 320 struct spdk_accel_module_if *module = g_modules_opc[task->op_code].module; 321 int rc; 322 323 rc = module->submit_tasks(module_ch, task); 324 if (spdk_unlikely(rc != 0)) { 325 accel_update_task_stats(accel_ch, task, failed, 1); 326 } 327 328 return rc; 329 } 330 331 static inline uint64_t 332 accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt) 333 { 334 uint64_t result = 0; 335 uint32_t i; 336 337 for (i = 0; i < iovcnt; ++i) { 338 result += iovs[i].iov_len; 339 } 340 341 return result; 342 } 343 344 /* Accel framework public API for copy function */ 345 int 346 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src, 347 uint64_t nbytes, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg) 348 { 349 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 350 struct spdk_accel_task *accel_task; 351 352 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 353 if (spdk_unlikely(accel_task == NULL)) { 354 return -ENOMEM; 355 } 356 357 accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC]; 358 accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST]; 359 accel_task->d.iovs[0].iov_base = dst; 360 accel_task->d.iovs[0].iov_len = nbytes; 361 accel_task->d.iovcnt = 1; 362 accel_task->s.iovs[0].iov_base = src; 363 accel_task->s.iovs[0].iov_len = nbytes; 364 accel_task->s.iovcnt = 1; 365 accel_task->nbytes = nbytes; 366 accel_task->op_code = SPDK_ACCEL_OPC_COPY; 367 accel_task->flags = flags; 368 accel_task->src_domain = NULL; 369 accel_task->dst_domain = NULL; 370 accel_task->step_cb_fn = NULL; 371 372 return accel_submit_task(accel_ch, accel_task); 373 } 374 375 /* Accel framework public API for dual cast copy function */ 376 int 377 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1, 378 void *dst2, void *src, uint64_t nbytes, int flags, 379 spdk_accel_completion_cb cb_fn, void *cb_arg) 380 { 381 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 382 struct spdk_accel_task *accel_task; 383 384 if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) { 385 SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n"); 386 return -EINVAL; 387 } 388 389 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 390 if (spdk_unlikely(accel_task == NULL)) { 391 return -ENOMEM; 392 } 393 394 accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC]; 395 accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST]; 396 accel_task->d2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST2]; 397 accel_task->d.iovs[0].iov_base = dst1; 398 accel_task->d.iovs[0].iov_len = nbytes; 399 accel_task->d.iovcnt = 1; 400 accel_task->d2.iovs[0].iov_base = dst2; 401 accel_task->d2.iovs[0].iov_len = nbytes; 402 accel_task->d2.iovcnt = 1; 403 accel_task->s.iovs[0].iov_base = src; 404 accel_task->s.iovs[0].iov_len = nbytes; 405 accel_task->s.iovcnt = 1; 406 accel_task->nbytes = nbytes; 407 accel_task->flags = flags; 408 accel_task->op_code = SPDK_ACCEL_OPC_DUALCAST; 409 accel_task->src_domain = NULL; 410 accel_task->dst_domain = NULL; 411 accel_task->step_cb_fn = NULL; 412 413 return accel_submit_task(accel_ch, accel_task); 414 } 415 416 /* Accel framework public API for compare function */ 417 int 418 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1, 419 void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn, 420 void *cb_arg) 421 { 422 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 423 struct spdk_accel_task *accel_task; 424 425 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 426 if (spdk_unlikely(accel_task == NULL)) { 427 return -ENOMEM; 428 } 429 430 accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC]; 431 accel_task->s2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC2]; 432 accel_task->s.iovs[0].iov_base = src1; 433 accel_task->s.iovs[0].iov_len = nbytes; 434 accel_task->s.iovcnt = 1; 435 accel_task->s2.iovs[0].iov_base = src2; 436 accel_task->s2.iovs[0].iov_len = nbytes; 437 accel_task->s2.iovcnt = 1; 438 accel_task->nbytes = nbytes; 439 accel_task->op_code = SPDK_ACCEL_OPC_COMPARE; 440 accel_task->src_domain = NULL; 441 accel_task->dst_domain = NULL; 442 accel_task->step_cb_fn = NULL; 443 444 return accel_submit_task(accel_ch, accel_task); 445 } 446 447 /* Accel framework public API for fill function */ 448 int 449 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst, 450 uint8_t fill, uint64_t nbytes, int flags, 451 spdk_accel_completion_cb cb_fn, void *cb_arg) 452 { 453 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 454 struct spdk_accel_task *accel_task; 455 456 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 457 if (spdk_unlikely(accel_task == NULL)) { 458 return -ENOMEM; 459 } 460 461 accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST]; 462 accel_task->d.iovs[0].iov_base = dst; 463 accel_task->d.iovs[0].iov_len = nbytes; 464 accel_task->d.iovcnt = 1; 465 accel_task->nbytes = nbytes; 466 memset(&accel_task->fill_pattern, fill, sizeof(uint64_t)); 467 accel_task->flags = flags; 468 accel_task->op_code = SPDK_ACCEL_OPC_FILL; 469 accel_task->src_domain = NULL; 470 accel_task->dst_domain = NULL; 471 accel_task->step_cb_fn = NULL; 472 473 return accel_submit_task(accel_ch, accel_task); 474 } 475 476 /* Accel framework public API for CRC-32C function */ 477 int 478 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst, 479 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn, 480 void *cb_arg) 481 { 482 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 483 struct spdk_accel_task *accel_task; 484 485 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 486 if (spdk_unlikely(accel_task == NULL)) { 487 return -ENOMEM; 488 } 489 490 accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC]; 491 accel_task->s.iovs[0].iov_base = src; 492 accel_task->s.iovs[0].iov_len = nbytes; 493 accel_task->s.iovcnt = 1; 494 accel_task->nbytes = nbytes; 495 accel_task->crc_dst = crc_dst; 496 accel_task->seed = seed; 497 accel_task->op_code = SPDK_ACCEL_OPC_CRC32C; 498 accel_task->src_domain = NULL; 499 accel_task->dst_domain = NULL; 500 accel_task->step_cb_fn = NULL; 501 502 return accel_submit_task(accel_ch, accel_task); 503 } 504 505 /* Accel framework public API for chained CRC-32C function */ 506 int 507 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst, 508 struct iovec *iov, uint32_t iov_cnt, uint32_t seed, 509 spdk_accel_completion_cb cb_fn, void *cb_arg) 510 { 511 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 512 struct spdk_accel_task *accel_task; 513 514 if (iov == NULL) { 515 SPDK_ERRLOG("iov should not be NULL"); 516 return -EINVAL; 517 } 518 519 if (!iov_cnt) { 520 SPDK_ERRLOG("iovcnt should not be zero value\n"); 521 return -EINVAL; 522 } 523 524 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 525 if (spdk_unlikely(accel_task == NULL)) { 526 SPDK_ERRLOG("no memory\n"); 527 assert(0); 528 return -ENOMEM; 529 } 530 531 accel_task->s.iovs = iov; 532 accel_task->s.iovcnt = iov_cnt; 533 accel_task->nbytes = accel_get_iovlen(iov, iov_cnt); 534 accel_task->crc_dst = crc_dst; 535 accel_task->seed = seed; 536 accel_task->op_code = SPDK_ACCEL_OPC_CRC32C; 537 accel_task->src_domain = NULL; 538 accel_task->dst_domain = NULL; 539 accel_task->step_cb_fn = NULL; 540 541 return accel_submit_task(accel_ch, accel_task); 542 } 543 544 /* Accel framework public API for copy with CRC-32C function */ 545 int 546 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst, 547 void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes, 548 int flags, spdk_accel_completion_cb cb_fn, void *cb_arg) 549 { 550 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 551 struct spdk_accel_task *accel_task; 552 553 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 554 if (spdk_unlikely(accel_task == NULL)) { 555 return -ENOMEM; 556 } 557 558 accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC]; 559 accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST]; 560 accel_task->d.iovs[0].iov_base = dst; 561 accel_task->d.iovs[0].iov_len = nbytes; 562 accel_task->d.iovcnt = 1; 563 accel_task->s.iovs[0].iov_base = src; 564 accel_task->s.iovs[0].iov_len = nbytes; 565 accel_task->s.iovcnt = 1; 566 accel_task->nbytes = nbytes; 567 accel_task->crc_dst = crc_dst; 568 accel_task->seed = seed; 569 accel_task->flags = flags; 570 accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C; 571 accel_task->src_domain = NULL; 572 accel_task->dst_domain = NULL; 573 accel_task->step_cb_fn = NULL; 574 575 return accel_submit_task(accel_ch, accel_task); 576 } 577 578 /* Accel framework public API for chained copy + CRC-32C function */ 579 int 580 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst, 581 struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst, 582 uint32_t seed, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg) 583 { 584 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 585 struct spdk_accel_task *accel_task; 586 uint64_t nbytes; 587 588 if (src_iovs == NULL) { 589 SPDK_ERRLOG("iov should not be NULL"); 590 return -EINVAL; 591 } 592 593 if (!iov_cnt) { 594 SPDK_ERRLOG("iovcnt should not be zero value\n"); 595 return -EINVAL; 596 } 597 598 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 599 if (spdk_unlikely(accel_task == NULL)) { 600 SPDK_ERRLOG("no memory\n"); 601 assert(0); 602 return -ENOMEM; 603 } 604 605 nbytes = accel_get_iovlen(src_iovs, iov_cnt); 606 accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST]; 607 accel_task->d.iovs[0].iov_base = dst; 608 accel_task->d.iovs[0].iov_len = nbytes; 609 accel_task->d.iovcnt = 1; 610 accel_task->s.iovs = src_iovs; 611 accel_task->s.iovcnt = iov_cnt; 612 accel_task->nbytes = nbytes; 613 accel_task->crc_dst = crc_dst; 614 accel_task->seed = seed; 615 accel_task->flags = flags; 616 accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C; 617 accel_task->src_domain = NULL; 618 accel_task->dst_domain = NULL; 619 accel_task->step_cb_fn = NULL; 620 621 return accel_submit_task(accel_ch, accel_task); 622 } 623 624 int 625 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes, 626 struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size, int flags, 627 spdk_accel_completion_cb cb_fn, void *cb_arg) 628 { 629 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 630 struct spdk_accel_task *accel_task; 631 632 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 633 if (spdk_unlikely(accel_task == NULL)) { 634 return -ENOMEM; 635 } 636 637 accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST]; 638 accel_task->d.iovs[0].iov_base = dst; 639 accel_task->d.iovs[0].iov_len = nbytes; 640 accel_task->d.iovcnt = 1; 641 accel_task->output_size = output_size; 642 accel_task->s.iovs = src_iovs; 643 accel_task->s.iovcnt = src_iovcnt; 644 accel_task->nbytes = nbytes; 645 accel_task->flags = flags; 646 accel_task->op_code = SPDK_ACCEL_OPC_COMPRESS; 647 accel_task->src_domain = NULL; 648 accel_task->dst_domain = NULL; 649 accel_task->step_cb_fn = NULL; 650 651 return accel_submit_task(accel_ch, accel_task); 652 } 653 654 int 655 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs, 656 size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt, 657 uint32_t *output_size, int flags, spdk_accel_completion_cb cb_fn, 658 void *cb_arg) 659 { 660 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 661 struct spdk_accel_task *accel_task; 662 663 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 664 if (spdk_unlikely(accel_task == NULL)) { 665 return -ENOMEM; 666 } 667 668 accel_task->output_size = output_size; 669 accel_task->s.iovs = src_iovs; 670 accel_task->s.iovcnt = src_iovcnt; 671 accel_task->d.iovs = dst_iovs; 672 accel_task->d.iovcnt = dst_iovcnt; 673 accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 674 accel_task->flags = flags; 675 accel_task->op_code = SPDK_ACCEL_OPC_DECOMPRESS; 676 accel_task->src_domain = NULL; 677 accel_task->dst_domain = NULL; 678 accel_task->step_cb_fn = NULL; 679 680 return accel_submit_task(accel_ch, accel_task); 681 } 682 683 int 684 spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key, 685 struct iovec *dst_iovs, uint32_t dst_iovcnt, 686 struct iovec *src_iovs, uint32_t src_iovcnt, 687 uint64_t iv, uint32_t block_size, int flags, 688 spdk_accel_completion_cb cb_fn, void *cb_arg) 689 { 690 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 691 struct spdk_accel_task *accel_task; 692 693 if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) { 694 return -EINVAL; 695 } 696 697 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 698 if (spdk_unlikely(accel_task == NULL)) { 699 return -ENOMEM; 700 } 701 702 accel_task->crypto_key = key; 703 accel_task->s.iovs = src_iovs; 704 accel_task->s.iovcnt = src_iovcnt; 705 accel_task->d.iovs = dst_iovs; 706 accel_task->d.iovcnt = dst_iovcnt; 707 accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 708 accel_task->iv = iv; 709 accel_task->block_size = block_size; 710 accel_task->flags = flags; 711 accel_task->op_code = SPDK_ACCEL_OPC_ENCRYPT; 712 accel_task->src_domain = NULL; 713 accel_task->dst_domain = NULL; 714 accel_task->step_cb_fn = NULL; 715 716 return accel_submit_task(accel_ch, accel_task); 717 } 718 719 int 720 spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key, 721 struct iovec *dst_iovs, uint32_t dst_iovcnt, 722 struct iovec *src_iovs, uint32_t src_iovcnt, 723 uint64_t iv, uint32_t block_size, int flags, 724 spdk_accel_completion_cb cb_fn, void *cb_arg) 725 { 726 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 727 struct spdk_accel_task *accel_task; 728 729 if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) { 730 return -EINVAL; 731 } 732 733 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 734 if (spdk_unlikely(accel_task == NULL)) { 735 return -ENOMEM; 736 } 737 738 accel_task->crypto_key = key; 739 accel_task->s.iovs = src_iovs; 740 accel_task->s.iovcnt = src_iovcnt; 741 accel_task->d.iovs = dst_iovs; 742 accel_task->d.iovcnt = dst_iovcnt; 743 accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 744 accel_task->iv = iv; 745 accel_task->block_size = block_size; 746 accel_task->flags = flags; 747 accel_task->op_code = SPDK_ACCEL_OPC_DECRYPT; 748 accel_task->src_domain = NULL; 749 accel_task->dst_domain = NULL; 750 accel_task->step_cb_fn = NULL; 751 752 return accel_submit_task(accel_ch, accel_task); 753 } 754 755 int 756 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs, 757 uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg) 758 { 759 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 760 struct spdk_accel_task *accel_task; 761 762 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 763 if (spdk_unlikely(accel_task == NULL)) { 764 return -ENOMEM; 765 } 766 767 accel_task->nsrcs.srcs = sources; 768 accel_task->nsrcs.cnt = nsrcs; 769 accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST]; 770 accel_task->d.iovs[0].iov_base = dst; 771 accel_task->d.iovs[0].iov_len = nbytes; 772 accel_task->d.iovcnt = 1; 773 accel_task->nbytes = nbytes; 774 accel_task->op_code = SPDK_ACCEL_OPC_XOR; 775 accel_task->src_domain = NULL; 776 accel_task->dst_domain = NULL; 777 accel_task->step_cb_fn = NULL; 778 779 return accel_submit_task(accel_ch, accel_task); 780 } 781 782 static inline struct accel_buffer * 783 accel_get_buf(struct accel_io_channel *ch, uint64_t len) 784 { 785 struct accel_buffer *buf; 786 787 buf = SLIST_FIRST(&ch->buf_pool); 788 if (spdk_unlikely(buf == NULL)) { 789 accel_update_stats(ch, retry.bufdesc, 1); 790 return NULL; 791 } 792 793 SLIST_REMOVE_HEAD(&ch->buf_pool, link); 794 buf->len = len; 795 buf->buf = NULL; 796 buf->seq = NULL; 797 buf->cb_fn = NULL; 798 799 return buf; 800 } 801 802 static inline void 803 accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf) 804 { 805 if (buf->buf != NULL) { 806 spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len); 807 } 808 809 SLIST_INSERT_HEAD(&ch->buf_pool, buf, link); 810 } 811 812 static inline struct spdk_accel_sequence * 813 accel_sequence_get(struct accel_io_channel *ch) 814 { 815 struct spdk_accel_sequence *seq; 816 817 seq = SLIST_FIRST(&ch->seq_pool); 818 if (spdk_unlikely(seq == NULL)) { 819 accel_update_stats(ch, retry.sequence, 1); 820 return NULL; 821 } 822 823 SLIST_REMOVE_HEAD(&ch->seq_pool, link); 824 825 TAILQ_INIT(&seq->tasks); 826 SLIST_INIT(&seq->bounce_bufs); 827 828 seq->ch = ch; 829 seq->status = 0; 830 seq->state = ACCEL_SEQUENCE_STATE_INIT; 831 seq->in_process_sequence = false; 832 833 return seq; 834 } 835 836 static inline void 837 accel_sequence_put(struct spdk_accel_sequence *seq) 838 { 839 struct accel_io_channel *ch = seq->ch; 840 struct accel_buffer *buf; 841 842 while (!SLIST_EMPTY(&seq->bounce_bufs)) { 843 buf = SLIST_FIRST(&seq->bounce_bufs); 844 SLIST_REMOVE_HEAD(&seq->bounce_bufs, link); 845 accel_put_buf(seq->ch, buf); 846 } 847 848 assert(TAILQ_EMPTY(&seq->tasks)); 849 seq->ch = NULL; 850 851 SLIST_INSERT_HEAD(&ch->seq_pool, seq, link); 852 } 853 854 static void accel_sequence_task_cb(void *cb_arg, int status); 855 856 static inline struct spdk_accel_task * 857 accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq, 858 spdk_accel_step_cb cb_fn, void *cb_arg) 859 { 860 struct spdk_accel_task *task; 861 862 task = _get_task(ch, accel_sequence_task_cb, seq); 863 if (spdk_unlikely(task == NULL)) { 864 return task; 865 } 866 867 task->step_cb_fn = cb_fn; 868 task->step_cb_arg = cb_arg; 869 task->seq = seq; 870 871 return task; 872 } 873 874 int 875 spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 876 struct iovec *dst_iovs, uint32_t dst_iovcnt, 877 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 878 struct iovec *src_iovs, uint32_t src_iovcnt, 879 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 880 int flags, spdk_accel_step_cb cb_fn, void *cb_arg) 881 { 882 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 883 struct spdk_accel_task *task; 884 struct spdk_accel_sequence *seq = *pseq; 885 886 if (seq == NULL) { 887 seq = accel_sequence_get(accel_ch); 888 if (spdk_unlikely(seq == NULL)) { 889 return -ENOMEM; 890 } 891 } 892 893 assert(seq->ch == accel_ch); 894 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 895 if (spdk_unlikely(task == NULL)) { 896 if (*pseq == NULL) { 897 accel_sequence_put(seq); 898 } 899 900 return -ENOMEM; 901 } 902 903 task->dst_domain = dst_domain; 904 task->dst_domain_ctx = dst_domain_ctx; 905 task->d.iovs = dst_iovs; 906 task->d.iovcnt = dst_iovcnt; 907 task->src_domain = src_domain; 908 task->src_domain_ctx = src_domain_ctx; 909 task->s.iovs = src_iovs; 910 task->s.iovcnt = src_iovcnt; 911 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 912 task->flags = flags; 913 task->op_code = SPDK_ACCEL_OPC_COPY; 914 915 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 916 *pseq = seq; 917 918 return 0; 919 } 920 921 int 922 spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 923 void *buf, uint64_t len, 924 struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern, 925 int flags, spdk_accel_step_cb cb_fn, void *cb_arg) 926 { 927 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 928 struct spdk_accel_task *task; 929 struct spdk_accel_sequence *seq = *pseq; 930 931 if (seq == NULL) { 932 seq = accel_sequence_get(accel_ch); 933 if (spdk_unlikely(seq == NULL)) { 934 return -ENOMEM; 935 } 936 } 937 938 assert(seq->ch == accel_ch); 939 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 940 if (spdk_unlikely(task == NULL)) { 941 if (*pseq == NULL) { 942 accel_sequence_put(seq); 943 } 944 945 return -ENOMEM; 946 } 947 948 memset(&task->fill_pattern, pattern, sizeof(uint64_t)); 949 950 task->d.iovs = &task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST]; 951 task->d.iovs[0].iov_base = buf; 952 task->d.iovs[0].iov_len = len; 953 task->d.iovcnt = 1; 954 task->nbytes = len; 955 task->src_domain = NULL; 956 task->dst_domain = domain; 957 task->dst_domain_ctx = domain_ctx; 958 task->flags = flags; 959 task->op_code = SPDK_ACCEL_OPC_FILL; 960 961 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 962 *pseq = seq; 963 964 return 0; 965 } 966 967 int 968 spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 969 struct iovec *dst_iovs, size_t dst_iovcnt, 970 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 971 struct iovec *src_iovs, size_t src_iovcnt, 972 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 973 int flags, spdk_accel_step_cb cb_fn, void *cb_arg) 974 { 975 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 976 struct spdk_accel_task *task; 977 struct spdk_accel_sequence *seq = *pseq; 978 979 if (seq == NULL) { 980 seq = accel_sequence_get(accel_ch); 981 if (spdk_unlikely(seq == NULL)) { 982 return -ENOMEM; 983 } 984 } 985 986 assert(seq->ch == accel_ch); 987 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 988 if (spdk_unlikely(task == NULL)) { 989 if (*pseq == NULL) { 990 accel_sequence_put(seq); 991 } 992 993 return -ENOMEM; 994 } 995 996 /* TODO: support output_size for chaining */ 997 task->output_size = NULL; 998 task->dst_domain = dst_domain; 999 task->dst_domain_ctx = dst_domain_ctx; 1000 task->d.iovs = dst_iovs; 1001 task->d.iovcnt = dst_iovcnt; 1002 task->src_domain = src_domain; 1003 task->src_domain_ctx = src_domain_ctx; 1004 task->s.iovs = src_iovs; 1005 task->s.iovcnt = src_iovcnt; 1006 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1007 task->flags = flags; 1008 task->op_code = SPDK_ACCEL_OPC_DECOMPRESS; 1009 1010 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1011 *pseq = seq; 1012 1013 return 0; 1014 } 1015 1016 int 1017 spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1018 struct spdk_accel_crypto_key *key, 1019 struct iovec *dst_iovs, uint32_t dst_iovcnt, 1020 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1021 struct iovec *src_iovs, uint32_t src_iovcnt, 1022 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1023 uint64_t iv, uint32_t block_size, int flags, 1024 spdk_accel_step_cb cb_fn, void *cb_arg) 1025 { 1026 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1027 struct spdk_accel_task *task; 1028 struct spdk_accel_sequence *seq = *pseq; 1029 1030 assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size); 1031 1032 if (seq == NULL) { 1033 seq = accel_sequence_get(accel_ch); 1034 if (spdk_unlikely(seq == NULL)) { 1035 return -ENOMEM; 1036 } 1037 } 1038 1039 assert(seq->ch == accel_ch); 1040 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1041 if (spdk_unlikely(task == NULL)) { 1042 if (*pseq == NULL) { 1043 accel_sequence_put(seq); 1044 } 1045 1046 return -ENOMEM; 1047 } 1048 1049 task->crypto_key = key; 1050 task->src_domain = src_domain; 1051 task->src_domain_ctx = src_domain_ctx; 1052 task->s.iovs = src_iovs; 1053 task->s.iovcnt = src_iovcnt; 1054 task->dst_domain = dst_domain; 1055 task->dst_domain_ctx = dst_domain_ctx; 1056 task->d.iovs = dst_iovs; 1057 task->d.iovcnt = dst_iovcnt; 1058 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1059 task->iv = iv; 1060 task->block_size = block_size; 1061 task->flags = flags; 1062 task->op_code = SPDK_ACCEL_OPC_ENCRYPT; 1063 1064 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1065 *pseq = seq; 1066 1067 return 0; 1068 } 1069 1070 int 1071 spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1072 struct spdk_accel_crypto_key *key, 1073 struct iovec *dst_iovs, uint32_t dst_iovcnt, 1074 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1075 struct iovec *src_iovs, uint32_t src_iovcnt, 1076 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1077 uint64_t iv, uint32_t block_size, int flags, 1078 spdk_accel_step_cb cb_fn, void *cb_arg) 1079 { 1080 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1081 struct spdk_accel_task *task; 1082 struct spdk_accel_sequence *seq = *pseq; 1083 1084 assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size); 1085 1086 if (seq == NULL) { 1087 seq = accel_sequence_get(accel_ch); 1088 if (spdk_unlikely(seq == NULL)) { 1089 return -ENOMEM; 1090 } 1091 } 1092 1093 assert(seq->ch == accel_ch); 1094 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1095 if (spdk_unlikely(task == NULL)) { 1096 if (*pseq == NULL) { 1097 accel_sequence_put(seq); 1098 } 1099 1100 return -ENOMEM; 1101 } 1102 1103 task->crypto_key = key; 1104 task->src_domain = src_domain; 1105 task->src_domain_ctx = src_domain_ctx; 1106 task->s.iovs = src_iovs; 1107 task->s.iovcnt = src_iovcnt; 1108 task->dst_domain = dst_domain; 1109 task->dst_domain_ctx = dst_domain_ctx; 1110 task->d.iovs = dst_iovs; 1111 task->d.iovcnt = dst_iovcnt; 1112 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1113 task->iv = iv; 1114 task->block_size = block_size; 1115 task->flags = flags; 1116 task->op_code = SPDK_ACCEL_OPC_DECRYPT; 1117 1118 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1119 *pseq = seq; 1120 1121 return 0; 1122 } 1123 1124 int 1125 spdk_accel_append_crc32c(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1126 uint32_t *dst, struct iovec *iovs, uint32_t iovcnt, 1127 struct spdk_memory_domain *domain, void *domain_ctx, 1128 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg) 1129 { 1130 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1131 struct spdk_accel_task *task; 1132 struct spdk_accel_sequence *seq = *pseq; 1133 1134 if (seq == NULL) { 1135 seq = accel_sequence_get(accel_ch); 1136 if (spdk_unlikely(seq == NULL)) { 1137 return -ENOMEM; 1138 } 1139 } 1140 1141 assert(seq->ch == accel_ch); 1142 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1143 if (spdk_unlikely(task == NULL)) { 1144 if (*pseq == NULL) { 1145 accel_sequence_put(seq); 1146 } 1147 1148 return -ENOMEM; 1149 } 1150 1151 task->s.iovs = iovs; 1152 task->s.iovcnt = iovcnt; 1153 task->src_domain = domain; 1154 task->src_domain_ctx = domain_ctx; 1155 task->nbytes = accel_get_iovlen(iovs, iovcnt); 1156 task->crc_dst = dst; 1157 task->seed = seed; 1158 task->op_code = SPDK_ACCEL_OPC_CRC32C; 1159 task->dst_domain = NULL; 1160 1161 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1162 *pseq = seq; 1163 1164 return 0; 1165 } 1166 1167 int 1168 spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf, 1169 struct spdk_memory_domain **domain, void **domain_ctx) 1170 { 1171 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1172 struct accel_buffer *accel_buf; 1173 1174 accel_buf = accel_get_buf(accel_ch, len); 1175 if (spdk_unlikely(accel_buf == NULL)) { 1176 return -ENOMEM; 1177 } 1178 1179 /* We always return the same pointer and identify the buffers through domain_ctx */ 1180 *buf = ACCEL_BUFFER_BASE; 1181 *domain_ctx = accel_buf; 1182 *domain = g_accel_domain; 1183 1184 return 0; 1185 } 1186 1187 void 1188 spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf, 1189 struct spdk_memory_domain *domain, void *domain_ctx) 1190 { 1191 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1192 struct accel_buffer *accel_buf = domain_ctx; 1193 1194 assert(domain == g_accel_domain); 1195 assert(buf == ACCEL_BUFFER_BASE); 1196 1197 accel_put_buf(accel_ch, accel_buf); 1198 } 1199 1200 static void 1201 accel_sequence_complete_task(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1202 { 1203 struct accel_io_channel *ch = seq->ch; 1204 spdk_accel_step_cb cb_fn; 1205 void *cb_arg; 1206 1207 TAILQ_REMOVE(&seq->tasks, task, seq_link); 1208 cb_fn = task->step_cb_fn; 1209 cb_arg = task->step_cb_arg; 1210 TAILQ_INSERT_HEAD(&ch->task_pool, task, link); 1211 if (cb_fn != NULL) { 1212 cb_fn(cb_arg); 1213 } 1214 } 1215 1216 static void 1217 accel_sequence_complete_tasks(struct spdk_accel_sequence *seq) 1218 { 1219 struct spdk_accel_task *task; 1220 1221 while (!TAILQ_EMPTY(&seq->tasks)) { 1222 task = TAILQ_FIRST(&seq->tasks); 1223 accel_sequence_complete_task(seq, task); 1224 } 1225 } 1226 1227 static void 1228 accel_sequence_complete(struct spdk_accel_sequence *seq) 1229 { 1230 SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, seq->status); 1231 1232 accel_update_stats(seq->ch, sequence_executed, 1); 1233 if (spdk_unlikely(seq->status != 0)) { 1234 accel_update_stats(seq->ch, sequence_failed, 1); 1235 } 1236 1237 /* First notify all users that appended operations to this sequence */ 1238 accel_sequence_complete_tasks(seq); 1239 1240 /* Then notify the user that finished the sequence */ 1241 seq->cb_fn(seq->cb_arg, seq->status); 1242 1243 accel_sequence_put(seq); 1244 } 1245 1246 static void 1247 accel_update_virt_iov(struct iovec *diov, struct iovec *siov, struct accel_buffer *accel_buf) 1248 { 1249 uintptr_t offset; 1250 1251 offset = (uintptr_t)siov->iov_base & ACCEL_BUFFER_OFFSET_MASK; 1252 assert(offset < accel_buf->len); 1253 1254 diov->iov_base = (char *)accel_buf->buf + offset; 1255 diov->iov_len = siov->iov_len; 1256 } 1257 1258 static void 1259 accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf) 1260 { 1261 struct spdk_accel_task *task; 1262 struct iovec *iov; 1263 1264 /* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks 1265 * in a sequence that were using it. 1266 */ 1267 TAILQ_FOREACH(task, &seq->tasks, seq_link) { 1268 if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) { 1269 iov = &task->aux_iovs[SPDK_ACCEL_AXU_IOV_VIRT_SRC]; 1270 assert(task->s.iovcnt == 1); 1271 accel_update_virt_iov(iov, &task->s.iovs[0], buf); 1272 task->src_domain = NULL; 1273 task->s.iovs = iov; 1274 } 1275 if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) { 1276 iov = &task->aux_iovs[SPDK_ACCEL_AXU_IOV_VIRT_DST]; 1277 assert(task->d.iovcnt == 1); 1278 accel_update_virt_iov(iov, &task->d.iovs[0], buf); 1279 task->dst_domain = NULL; 1280 task->d.iovs = iov; 1281 } 1282 } 1283 } 1284 1285 static void accel_process_sequence(struct spdk_accel_sequence *seq); 1286 1287 static void 1288 accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf) 1289 { 1290 struct accel_buffer *accel_buf; 1291 1292 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1293 1294 assert(accel_buf->seq != NULL); 1295 assert(accel_buf->buf == NULL); 1296 accel_buf->buf = buf; 1297 1298 assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF); 1299 accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF); 1300 accel_sequence_set_virtbuf(accel_buf->seq, accel_buf); 1301 accel_process_sequence(accel_buf->seq); 1302 } 1303 1304 static bool 1305 accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf, 1306 spdk_iobuf_get_cb cb_fn) 1307 { 1308 struct accel_io_channel *ch = seq->ch; 1309 1310 assert(buf->buf == NULL); 1311 assert(buf->seq == NULL); 1312 1313 buf->seq = seq; 1314 buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn); 1315 if (buf->buf == NULL) { 1316 accel_update_stats(ch, retry.iobuf, 1); 1317 return false; 1318 } 1319 1320 return true; 1321 } 1322 1323 static bool 1324 accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1325 { 1326 /* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to 1327 * NULL */ 1328 if (task->src_domain == g_accel_domain) { 1329 if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx, 1330 accel_iobuf_get_virtbuf_cb)) { 1331 return false; 1332 } 1333 1334 accel_sequence_set_virtbuf(seq, task->src_domain_ctx); 1335 } 1336 1337 if (task->dst_domain == g_accel_domain) { 1338 if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx, 1339 accel_iobuf_get_virtbuf_cb)) { 1340 return false; 1341 } 1342 1343 accel_sequence_set_virtbuf(seq, task->dst_domain_ctx); 1344 } 1345 1346 return true; 1347 } 1348 1349 static void 1350 accel_sequence_get_buf_cb(struct spdk_iobuf_entry *entry, void *buf) 1351 { 1352 struct accel_buffer *accel_buf; 1353 1354 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1355 1356 assert(accel_buf->seq != NULL); 1357 assert(accel_buf->buf == NULL); 1358 accel_buf->buf = buf; 1359 1360 accel_sequence_set_virtbuf(accel_buf->seq, accel_buf); 1361 accel_buf->cb_fn(accel_buf->seq, accel_buf->cb_ctx); 1362 } 1363 1364 bool 1365 spdk_accel_alloc_sequence_buf(struct spdk_accel_sequence *seq, void *buf, 1366 struct spdk_memory_domain *domain, void *domain_ctx, 1367 spdk_accel_sequence_get_buf_cb cb_fn, void *cb_ctx) 1368 { 1369 struct accel_buffer *accel_buf = domain_ctx; 1370 1371 assert(domain == g_accel_domain); 1372 accel_buf->cb_fn = cb_fn; 1373 accel_buf->cb_ctx = cb_ctx; 1374 1375 if (!accel_sequence_alloc_buf(seq, accel_buf, accel_sequence_get_buf_cb)) { 1376 return false; 1377 } 1378 1379 accel_sequence_set_virtbuf(seq, accel_buf); 1380 1381 return true; 1382 } 1383 1384 struct spdk_accel_task * 1385 spdk_accel_sequence_first_task(struct spdk_accel_sequence *seq) 1386 { 1387 return TAILQ_FIRST(&seq->tasks); 1388 } 1389 1390 struct spdk_accel_task * 1391 spdk_accel_sequence_next_task(struct spdk_accel_task *task) 1392 { 1393 return TAILQ_NEXT(task, seq_link); 1394 } 1395 1396 static inline void 1397 accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs, 1398 uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx, 1399 struct accel_buffer *buf) 1400 { 1401 bounce->orig_iovs = *iovs; 1402 bounce->orig_iovcnt = *iovcnt; 1403 bounce->orig_domain = *domain; 1404 bounce->orig_domain_ctx = *domain_ctx; 1405 bounce->iov.iov_base = buf->buf; 1406 bounce->iov.iov_len = buf->len; 1407 1408 *iovs = &bounce->iov; 1409 *iovcnt = 1; 1410 *domain = NULL; 1411 } 1412 1413 static void 1414 accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf) 1415 { 1416 struct spdk_accel_task *task; 1417 struct accel_buffer *accel_buf; 1418 1419 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1420 assert(accel_buf->buf == NULL); 1421 accel_buf->buf = buf; 1422 1423 task = TAILQ_FIRST(&accel_buf->seq->tasks); 1424 assert(task != NULL); 1425 1426 assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF); 1427 accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF); 1428 accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain, 1429 &task->src_domain_ctx, accel_buf); 1430 accel_process_sequence(accel_buf->seq); 1431 } 1432 1433 static void 1434 accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf) 1435 { 1436 struct spdk_accel_task *task; 1437 struct accel_buffer *accel_buf; 1438 1439 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1440 assert(accel_buf->buf == NULL); 1441 accel_buf->buf = buf; 1442 1443 task = TAILQ_FIRST(&accel_buf->seq->tasks); 1444 assert(task != NULL); 1445 1446 assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF); 1447 accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF); 1448 accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain, 1449 &task->dst_domain_ctx, accel_buf); 1450 accel_process_sequence(accel_buf->seq); 1451 } 1452 1453 static int 1454 accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1455 { 1456 struct accel_buffer *buf; 1457 1458 if (task->src_domain != NULL) { 1459 /* By the time we're here, accel buffers should have been allocated */ 1460 assert(task->src_domain != g_accel_domain); 1461 1462 buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt)); 1463 if (buf == NULL) { 1464 SPDK_ERRLOG("Couldn't allocate buffer descriptor\n"); 1465 return -ENOMEM; 1466 } 1467 1468 SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link); 1469 if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) { 1470 return -EAGAIN; 1471 } 1472 1473 accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt, 1474 &task->src_domain, &task->src_domain_ctx, buf); 1475 } 1476 1477 if (task->dst_domain != NULL) { 1478 /* By the time we're here, accel buffers should have been allocated */ 1479 assert(task->dst_domain != g_accel_domain); 1480 1481 buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt)); 1482 if (buf == NULL) { 1483 /* The src buffer will be released when a sequence is completed */ 1484 SPDK_ERRLOG("Couldn't allocate buffer descriptor\n"); 1485 return -ENOMEM; 1486 } 1487 1488 SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link); 1489 if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) { 1490 return -EAGAIN; 1491 } 1492 1493 accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt, 1494 &task->dst_domain, &task->dst_domain_ctx, buf); 1495 } 1496 1497 return 0; 1498 } 1499 1500 static void 1501 accel_task_pull_data_cb(void *ctx, int status) 1502 { 1503 struct spdk_accel_sequence *seq = ctx; 1504 1505 assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA); 1506 if (spdk_likely(status == 0)) { 1507 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK); 1508 } else { 1509 accel_sequence_set_fail(seq, status); 1510 } 1511 1512 accel_process_sequence(seq); 1513 } 1514 1515 static void 1516 accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1517 { 1518 int rc; 1519 1520 assert(task->bounce.s.orig_iovs != NULL); 1521 assert(task->bounce.s.orig_domain != NULL); 1522 assert(task->bounce.s.orig_domain != g_accel_domain); 1523 assert(!g_modules_opc[task->op_code].supports_memory_domains); 1524 1525 rc = spdk_memory_domain_pull_data(task->bounce.s.orig_domain, 1526 task->bounce.s.orig_domain_ctx, 1527 task->bounce.s.orig_iovs, task->bounce.s.orig_iovcnt, 1528 task->s.iovs, task->s.iovcnt, 1529 accel_task_pull_data_cb, seq); 1530 if (spdk_unlikely(rc != 0)) { 1531 SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n", 1532 spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc); 1533 accel_sequence_set_fail(seq, rc); 1534 } 1535 } 1536 1537 static void 1538 accel_task_push_data_cb(void *ctx, int status) 1539 { 1540 struct spdk_accel_sequence *seq = ctx; 1541 1542 assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA); 1543 if (spdk_likely(status == 0)) { 1544 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK); 1545 } else { 1546 accel_sequence_set_fail(seq, status); 1547 } 1548 1549 accel_process_sequence(seq); 1550 } 1551 1552 static void 1553 accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1554 { 1555 int rc; 1556 1557 assert(task->bounce.d.orig_iovs != NULL); 1558 assert(task->bounce.d.orig_domain != NULL); 1559 assert(task->bounce.d.orig_domain != g_accel_domain); 1560 assert(!g_modules_opc[task->op_code].supports_memory_domains); 1561 1562 rc = spdk_memory_domain_push_data(task->bounce.d.orig_domain, 1563 task->bounce.d.orig_domain_ctx, 1564 task->bounce.d.orig_iovs, task->bounce.d.orig_iovcnt, 1565 task->d.iovs, task->d.iovcnt, 1566 accel_task_push_data_cb, seq); 1567 if (spdk_unlikely(rc != 0)) { 1568 SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n", 1569 spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc); 1570 accel_sequence_set_fail(seq, rc); 1571 } 1572 } 1573 1574 static void 1575 accel_process_sequence(struct spdk_accel_sequence *seq) 1576 { 1577 struct accel_io_channel *accel_ch = seq->ch; 1578 struct spdk_accel_task *task; 1579 enum accel_sequence_state state; 1580 int rc; 1581 1582 /* Prevent recursive calls to this function */ 1583 if (spdk_unlikely(seq->in_process_sequence)) { 1584 return; 1585 } 1586 seq->in_process_sequence = true; 1587 1588 task = TAILQ_FIRST(&seq->tasks); 1589 do { 1590 state = seq->state; 1591 switch (state) { 1592 case ACCEL_SEQUENCE_STATE_INIT: 1593 if (g_accel_driver != NULL) { 1594 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS); 1595 break; 1596 } 1597 /* Fall through */ 1598 case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF: 1599 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF); 1600 if (!accel_sequence_check_virtbuf(seq, task)) { 1601 /* We couldn't allocate a buffer, wait until one is available */ 1602 break; 1603 } 1604 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF); 1605 /* Fall through */ 1606 case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF: 1607 /* If a module supports memory domains, we don't need to allocate bounce 1608 * buffers */ 1609 if (g_modules_opc[task->op_code].supports_memory_domains) { 1610 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK); 1611 break; 1612 } 1613 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF); 1614 rc = accel_sequence_check_bouncebuf(seq, task); 1615 if (spdk_unlikely(rc != 0)) { 1616 /* We couldn't allocate a buffer, wait until one is available */ 1617 if (rc == -EAGAIN) { 1618 break; 1619 } 1620 accel_sequence_set_fail(seq, rc); 1621 break; 1622 } 1623 if (task->s.iovs == &task->bounce.s.iov) { 1624 assert(task->bounce.s.orig_iovs); 1625 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA); 1626 break; 1627 } 1628 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK); 1629 /* Fall through */ 1630 case ACCEL_SEQUENCE_STATE_EXEC_TASK: 1631 SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n", 1632 g_opcode_strings[task->op_code], seq); 1633 1634 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK); 1635 rc = accel_submit_task(accel_ch, task); 1636 if (spdk_unlikely(rc != 0)) { 1637 SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n", 1638 g_opcode_strings[task->op_code], seq); 1639 accel_sequence_set_fail(seq, rc); 1640 } 1641 break; 1642 case ACCEL_SEQUENCE_STATE_PULL_DATA: 1643 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA); 1644 accel_task_pull_data(seq, task); 1645 break; 1646 case ACCEL_SEQUENCE_STATE_COMPLETE_TASK: 1647 if (task->d.iovs == &task->bounce.d.iov) { 1648 assert(task->bounce.d.orig_iovs); 1649 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA); 1650 break; 1651 } 1652 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK); 1653 break; 1654 case ACCEL_SEQUENCE_STATE_PUSH_DATA: 1655 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA); 1656 accel_task_push_data(seq, task); 1657 break; 1658 case ACCEL_SEQUENCE_STATE_NEXT_TASK: 1659 accel_sequence_complete_task(seq, task); 1660 /* Check if there are any remaining tasks */ 1661 task = TAILQ_FIRST(&seq->tasks); 1662 if (task == NULL) { 1663 /* Immediately return here to make sure we don't touch the sequence 1664 * after it's completed */ 1665 accel_sequence_complete(seq); 1666 return; 1667 } 1668 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT); 1669 break; 1670 case ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS: 1671 assert(!TAILQ_EMPTY(&seq->tasks)); 1672 1673 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS); 1674 rc = g_accel_driver->execute_sequence(accel_ch->driver_channel, seq); 1675 if (spdk_unlikely(rc != 0)) { 1676 SPDK_ERRLOG("Failed to execute sequence: %p using driver: %s\n", 1677 seq, g_accel_driver->name); 1678 accel_sequence_set_fail(seq, rc); 1679 } 1680 break; 1681 case ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS: 1682 /* Get the task again, as the driver might have completed some tasks 1683 * synchronously */ 1684 task = TAILQ_FIRST(&seq->tasks); 1685 if (task == NULL) { 1686 /* Immediately return here to make sure we don't touch the sequence 1687 * after it's completed */ 1688 accel_sequence_complete(seq); 1689 return; 1690 } 1691 /* We don't want to execute the next task through the driver, so we 1692 * explicitly omit the INIT state here */ 1693 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF); 1694 break; 1695 case ACCEL_SEQUENCE_STATE_ERROR: 1696 /* Immediately return here to make sure we don't touch the sequence 1697 * after it's completed */ 1698 assert(seq->status != 0); 1699 accel_sequence_complete(seq); 1700 return; 1701 case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF: 1702 case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF: 1703 case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA: 1704 case ACCEL_SEQUENCE_STATE_AWAIT_TASK: 1705 case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA: 1706 case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS: 1707 break; 1708 default: 1709 assert(0 && "bad state"); 1710 break; 1711 } 1712 } while (seq->state != state); 1713 1714 seq->in_process_sequence = false; 1715 } 1716 1717 static void 1718 accel_sequence_task_cb(void *cb_arg, int status) 1719 { 1720 struct spdk_accel_sequence *seq = cb_arg; 1721 struct spdk_accel_task *task = TAILQ_FIRST(&seq->tasks); 1722 struct accel_io_channel *accel_ch = seq->ch; 1723 1724 /* spdk_accel_task_complete() puts the task back to the task pool, but we don't want to do 1725 * that if a task is part of a sequence. Removing the task from that pool here is the 1726 * easiest way to prevent this, even though it is a bit hacky. 1727 */ 1728 assert(task != NULL); 1729 TAILQ_REMOVE(&accel_ch->task_pool, task, link); 1730 1731 switch (seq->state) { 1732 case ACCEL_SEQUENCE_STATE_AWAIT_TASK: 1733 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK); 1734 if (spdk_unlikely(status != 0)) { 1735 SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n", 1736 g_opcode_strings[task->op_code], seq); 1737 accel_sequence_set_fail(seq, status); 1738 } 1739 1740 accel_process_sequence(seq); 1741 break; 1742 case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS: 1743 assert(g_accel_driver != NULL); 1744 /* Immediately remove the task from the outstanding list to make sure the next call 1745 * to spdk_accel_sequence_first_task() doesn't return it */ 1746 accel_sequence_complete_task(seq, task); 1747 if (spdk_unlikely(status != 0)) { 1748 SPDK_ERRLOG("Failed to execute %s operation, sequence: %p through " 1749 "driver: %s\n", g_opcode_strings[task->op_code], seq, 1750 g_accel_driver->name); 1751 /* Update status without using accel_sequence_set_fail() to avoid changing 1752 * seq's state to ERROR until driver calls spdk_accel_sequence_continue() */ 1753 seq->status = status; 1754 } 1755 break; 1756 default: 1757 assert(0 && "bad state"); 1758 break; 1759 } 1760 } 1761 1762 void 1763 spdk_accel_sequence_continue(struct spdk_accel_sequence *seq) 1764 { 1765 assert(g_accel_driver != NULL); 1766 assert(seq->state == ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS); 1767 1768 if (spdk_likely(seq->status == 0)) { 1769 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS); 1770 } else { 1771 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR); 1772 } 1773 1774 accel_process_sequence(seq); 1775 } 1776 1777 static bool 1778 accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt) 1779 { 1780 /* For now, just do a dumb check that the iovecs arrays are exactly the same */ 1781 if (iovacnt != iovbcnt) { 1782 return false; 1783 } 1784 1785 return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0; 1786 } 1787 1788 static bool 1789 accel_task_set_dstbuf(struct spdk_accel_task *task, struct spdk_accel_task *next) 1790 { 1791 struct spdk_accel_task *prev; 1792 1793 switch (task->op_code) { 1794 case SPDK_ACCEL_OPC_DECOMPRESS: 1795 case SPDK_ACCEL_OPC_FILL: 1796 case SPDK_ACCEL_OPC_ENCRYPT: 1797 case SPDK_ACCEL_OPC_DECRYPT: 1798 if (task->dst_domain != next->src_domain) { 1799 return false; 1800 } 1801 if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt, 1802 next->s.iovs, next->s.iovcnt)) { 1803 return false; 1804 } 1805 task->d.iovs = next->d.iovs; 1806 task->d.iovcnt = next->d.iovcnt; 1807 task->dst_domain = next->dst_domain; 1808 task->dst_domain_ctx = next->dst_domain_ctx; 1809 break; 1810 case SPDK_ACCEL_OPC_CRC32C: 1811 /* crc32 is special, because it doesn't have a dst buffer */ 1812 if (task->src_domain != next->src_domain) { 1813 return false; 1814 } 1815 if (!accel_compare_iovs(task->s.iovs, task->s.iovcnt, 1816 next->s.iovs, next->s.iovcnt)) { 1817 return false; 1818 } 1819 /* We can only change crc32's buffer if we can change previous task's buffer */ 1820 prev = TAILQ_PREV(task, accel_sequence_tasks, seq_link); 1821 if (prev == NULL) { 1822 return false; 1823 } 1824 if (!accel_task_set_dstbuf(prev, next)) { 1825 return false; 1826 } 1827 task->s.iovs = next->d.iovs; 1828 task->s.iovcnt = next->d.iovcnt; 1829 task->src_domain = next->dst_domain; 1830 task->src_domain_ctx = next->dst_domain_ctx; 1831 break; 1832 default: 1833 return false; 1834 } 1835 1836 return true; 1837 } 1838 1839 static void 1840 accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task, 1841 struct spdk_accel_task **next_task) 1842 { 1843 struct spdk_accel_task *next = *next_task; 1844 1845 switch (task->op_code) { 1846 case SPDK_ACCEL_OPC_COPY: 1847 /* We only allow changing src of operations that actually have a src, e.g. we never 1848 * do it for fill. Theoretically, it is possible, but we'd have to be careful to 1849 * change the src of the operation after fill (which in turn could also be a fill). 1850 * So, for the sake of simplicity, skip this type of operations for now. 1851 */ 1852 if (next->op_code != SPDK_ACCEL_OPC_DECOMPRESS && 1853 next->op_code != SPDK_ACCEL_OPC_COPY && 1854 next->op_code != SPDK_ACCEL_OPC_ENCRYPT && 1855 next->op_code != SPDK_ACCEL_OPC_DECRYPT && 1856 next->op_code != SPDK_ACCEL_OPC_CRC32C) { 1857 break; 1858 } 1859 if (task->dst_domain != next->src_domain) { 1860 break; 1861 } 1862 if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt, 1863 next->s.iovs, next->s.iovcnt)) { 1864 break; 1865 } 1866 next->s.iovs = task->s.iovs; 1867 next->s.iovcnt = task->s.iovcnt; 1868 next->src_domain = task->src_domain; 1869 next->src_domain_ctx = task->src_domain_ctx; 1870 accel_sequence_complete_task(seq, task); 1871 break; 1872 case SPDK_ACCEL_OPC_DECOMPRESS: 1873 case SPDK_ACCEL_OPC_FILL: 1874 case SPDK_ACCEL_OPC_ENCRYPT: 1875 case SPDK_ACCEL_OPC_DECRYPT: 1876 case SPDK_ACCEL_OPC_CRC32C: 1877 /* We can only merge tasks when one of them is a copy */ 1878 if (next->op_code != SPDK_ACCEL_OPC_COPY) { 1879 break; 1880 } 1881 if (!accel_task_set_dstbuf(task, next)) { 1882 break; 1883 } 1884 /* We're removing next_task from the tasks queue, so we need to update its pointer, 1885 * so that the TAILQ_FOREACH_SAFE() loop below works correctly */ 1886 *next_task = TAILQ_NEXT(next, seq_link); 1887 accel_sequence_complete_task(seq, next); 1888 break; 1889 default: 1890 assert(0 && "bad opcode"); 1891 break; 1892 } 1893 } 1894 1895 void 1896 spdk_accel_sequence_finish(struct spdk_accel_sequence *seq, 1897 spdk_accel_completion_cb cb_fn, void *cb_arg) 1898 { 1899 struct spdk_accel_task *task, *next; 1900 1901 /* Try to remove any copy operations if possible */ 1902 TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) { 1903 if (next == NULL) { 1904 break; 1905 } 1906 accel_sequence_merge_tasks(seq, task, &next); 1907 } 1908 1909 seq->cb_fn = cb_fn; 1910 seq->cb_arg = cb_arg; 1911 1912 accel_process_sequence(seq); 1913 } 1914 1915 void 1916 spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq) 1917 { 1918 struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks); 1919 struct spdk_accel_task *task; 1920 1921 TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link); 1922 1923 while (!TAILQ_EMPTY(&tasks)) { 1924 task = TAILQ_FIRST(&tasks); 1925 TAILQ_REMOVE(&tasks, task, seq_link); 1926 TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link); 1927 } 1928 } 1929 1930 void 1931 spdk_accel_sequence_abort(struct spdk_accel_sequence *seq) 1932 { 1933 if (seq == NULL) { 1934 return; 1935 } 1936 1937 accel_sequence_complete_tasks(seq); 1938 accel_sequence_put(seq); 1939 } 1940 1941 struct spdk_memory_domain * 1942 spdk_accel_get_memory_domain(void) 1943 { 1944 return g_accel_domain; 1945 } 1946 1947 static struct spdk_accel_module_if * 1948 _module_find_by_name(const char *name) 1949 { 1950 struct spdk_accel_module_if *accel_module = NULL; 1951 1952 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 1953 if (strcmp(name, accel_module->name) == 0) { 1954 break; 1955 } 1956 } 1957 1958 return accel_module; 1959 } 1960 1961 static inline struct spdk_accel_crypto_key * 1962 _accel_crypto_key_get(const char *name) 1963 { 1964 struct spdk_accel_crypto_key *key; 1965 1966 assert(spdk_spin_held(&g_keyring_spin)); 1967 1968 TAILQ_FOREACH(key, &g_keyring, link) { 1969 if (strcmp(name, key->param.key_name) == 0) { 1970 return key; 1971 } 1972 } 1973 1974 return NULL; 1975 } 1976 1977 static void 1978 accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key) 1979 { 1980 if (key->param.hex_key) { 1981 spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2); 1982 free(key->param.hex_key); 1983 } 1984 if (key->param.hex_key2) { 1985 spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2); 1986 free(key->param.hex_key2); 1987 } 1988 free(key->param.tweak_mode); 1989 free(key->param.key_name); 1990 free(key->param.cipher); 1991 if (key->key) { 1992 spdk_memset_s(key->key, key->key_size, 0, key->key_size); 1993 free(key->key); 1994 } 1995 if (key->key2) { 1996 spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size); 1997 free(key->key2); 1998 } 1999 free(key); 2000 } 2001 2002 static void 2003 accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key) 2004 { 2005 assert(key->module_if); 2006 assert(key->module_if->crypto_key_deinit); 2007 2008 key->module_if->crypto_key_deinit(key); 2009 accel_crypto_key_free_mem(key); 2010 } 2011 2012 /* 2013 * This function mitigates a timing side channel which could be caused by using strcmp() 2014 * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in 2015 * the article [1] for more details 2016 * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html 2017 */ 2018 static bool 2019 accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len) 2020 { 2021 size_t i; 2022 volatile size_t x = k1_len ^ k2_len; 2023 2024 for (i = 0; ((i < k1_len) & (i < k2_len)); i++) { 2025 x |= k1[i] ^ k2[i]; 2026 } 2027 2028 return x == 0; 2029 } 2030 2031 static const char *g_tweak_modes[] = { 2032 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA] = "SIMPLE_LBA", 2033 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_JOIN_NEG_LBA_WITH_LBA] = "JOIN_NEG_LBA_WITH_LBA", 2034 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_FULL_LBA] = "INCR_512_FULL_LBA", 2035 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_UPPER_LBA] = "INCR_512_UPPER_LBA", 2036 }; 2037 2038 static const char *g_ciphers[] = { 2039 [SPDK_ACCEL_CIPHER_AES_CBC] = "AES_CBC", 2040 [SPDK_ACCEL_CIPHER_AES_XTS] = "AES_XTS", 2041 }; 2042 2043 int 2044 spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param) 2045 { 2046 struct spdk_accel_module_if *module; 2047 struct spdk_accel_crypto_key *key; 2048 size_t hex_key_size, hex_key2_size; 2049 bool found = false; 2050 size_t i; 2051 int rc; 2052 2053 if (!param || !param->hex_key || !param->cipher || !param->key_name) { 2054 return -EINVAL; 2055 } 2056 2057 if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) { 2058 /* hardly ever possible, but let's check and warn the user */ 2059 SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n"); 2060 } 2061 module = g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module; 2062 2063 if (!module) { 2064 SPDK_ERRLOG("No accel module found assigned for crypto operation\n"); 2065 return -ENOENT; 2066 } 2067 2068 if (!module->crypto_key_init || !module->crypto_supports_cipher) { 2069 SPDK_ERRLOG("Module %s doesn't support crypto operations\n", module->name); 2070 return -ENOTSUP; 2071 } 2072 2073 key = calloc(1, sizeof(*key)); 2074 if (!key) { 2075 return -ENOMEM; 2076 } 2077 2078 key->param.key_name = strdup(param->key_name); 2079 if (!key->param.key_name) { 2080 rc = -ENOMEM; 2081 goto error; 2082 } 2083 2084 for (i = 0; i < SPDK_COUNTOF(g_ciphers); ++i) { 2085 assert(g_ciphers[i]); 2086 2087 if (strncmp(param->cipher, g_ciphers[i], strlen(g_ciphers[i])) == 0) { 2088 key->cipher = i; 2089 found = true; 2090 break; 2091 } 2092 } 2093 2094 if (!found) { 2095 SPDK_ERRLOG("Failed to parse cipher\n"); 2096 rc = -EINVAL; 2097 goto error; 2098 } 2099 2100 key->param.cipher = strdup(param->cipher); 2101 if (!key->param.cipher) { 2102 rc = -ENOMEM; 2103 goto error; 2104 } 2105 2106 hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2107 if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) { 2108 SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2109 rc = -EINVAL; 2110 goto error; 2111 } 2112 2113 if (hex_key_size == 0) { 2114 SPDK_ERRLOG("key1 size cannot be 0\n"); 2115 rc = -EINVAL; 2116 goto error; 2117 } 2118 2119 key->param.hex_key = strdup(param->hex_key); 2120 if (!key->param.hex_key) { 2121 rc = -ENOMEM; 2122 goto error; 2123 } 2124 2125 key->key_size = hex_key_size / 2; 2126 key->key = spdk_unhexlify(key->param.hex_key); 2127 if (!key->key) { 2128 SPDK_ERRLOG("Failed to unhexlify key1\n"); 2129 rc = -EINVAL; 2130 goto error; 2131 } 2132 2133 if (param->hex_key2) { 2134 hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2135 if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) { 2136 SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2137 rc = -EINVAL; 2138 goto error; 2139 } 2140 2141 if (hex_key2_size == 0) { 2142 SPDK_ERRLOG("key2 size cannot be 0\n"); 2143 rc = -EINVAL; 2144 goto error; 2145 } 2146 2147 key->param.hex_key2 = strdup(param->hex_key2); 2148 if (!key->param.hex_key2) { 2149 rc = -ENOMEM; 2150 goto error; 2151 } 2152 2153 key->key2_size = hex_key2_size / 2; 2154 key->key2 = spdk_unhexlify(key->param.hex_key2); 2155 if (!key->key2) { 2156 SPDK_ERRLOG("Failed to unhexlify key2\n"); 2157 rc = -EINVAL; 2158 goto error; 2159 } 2160 } 2161 2162 key->tweak_mode = ACCEL_CRYPTO_TWEAK_MODE_DEFAULT; 2163 if (param->tweak_mode) { 2164 found = false; 2165 2166 key->param.tweak_mode = strdup(param->tweak_mode); 2167 if (!key->param.tweak_mode) { 2168 rc = -ENOMEM; 2169 goto error; 2170 } 2171 2172 for (i = 0; i < SPDK_COUNTOF(g_tweak_modes); ++i) { 2173 assert(g_tweak_modes[i]); 2174 2175 if (strncmp(param->tweak_mode, g_tweak_modes[i], strlen(g_tweak_modes[i])) == 0) { 2176 key->tweak_mode = i; 2177 found = true; 2178 break; 2179 } 2180 } 2181 2182 if (!found) { 2183 SPDK_ERRLOG("Failed to parse tweak mode\n"); 2184 rc = -EINVAL; 2185 goto error; 2186 } 2187 } 2188 2189 if ((!module->crypto_supports_tweak_mode && key->tweak_mode != ACCEL_CRYPTO_TWEAK_MODE_DEFAULT) || 2190 (module->crypto_supports_tweak_mode && !module->crypto_supports_tweak_mode(key->tweak_mode))) { 2191 SPDK_ERRLOG("Module %s doesn't support %s tweak mode\n", module->name, 2192 g_tweak_modes[key->tweak_mode]); 2193 rc = -EINVAL; 2194 goto error; 2195 } 2196 2197 if (!module->crypto_supports_cipher(key->cipher, key->key_size)) { 2198 SPDK_ERRLOG("Module %s doesn't support %s cipher with %zu key size\n", module->name, 2199 g_ciphers[key->cipher], key->key_size); 2200 rc = -EINVAL; 2201 goto error; 2202 } 2203 2204 if (key->cipher == SPDK_ACCEL_CIPHER_AES_XTS) { 2205 if (!key->key2) { 2206 SPDK_ERRLOG("%s key2 is missing\n", g_ciphers[key->cipher]); 2207 rc = -EINVAL; 2208 goto error; 2209 } 2210 2211 if (key->key_size != key->key2_size) { 2212 SPDK_ERRLOG("%s key size %zu is not equal to key2 size %zu\n", g_ciphers[key->cipher], 2213 key->key_size, 2214 key->key2_size); 2215 rc = -EINVAL; 2216 goto error; 2217 } 2218 2219 if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) { 2220 SPDK_ERRLOG("%s identical keys are not secure\n", g_ciphers[key->cipher]); 2221 rc = -EINVAL; 2222 goto error; 2223 } 2224 } 2225 2226 if (key->cipher == SPDK_ACCEL_CIPHER_AES_CBC) { 2227 if (key->key2_size) { 2228 SPDK_ERRLOG("%s doesn't use key2\n", g_ciphers[key->cipher]); 2229 rc = -EINVAL; 2230 goto error; 2231 } 2232 } 2233 2234 key->module_if = module; 2235 2236 spdk_spin_lock(&g_keyring_spin); 2237 if (_accel_crypto_key_get(param->key_name)) { 2238 rc = -EEXIST; 2239 } else { 2240 rc = module->crypto_key_init(key); 2241 if (rc) { 2242 SPDK_ERRLOG("Module %s failed to initialize crypto key\n", module->name); 2243 } else { 2244 TAILQ_INSERT_TAIL(&g_keyring, key, link); 2245 } 2246 } 2247 spdk_spin_unlock(&g_keyring_spin); 2248 2249 if (rc) { 2250 goto error; 2251 } 2252 2253 return 0; 2254 2255 error: 2256 accel_crypto_key_free_mem(key); 2257 return rc; 2258 } 2259 2260 int 2261 spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key) 2262 { 2263 if (!key || !key->module_if) { 2264 return -EINVAL; 2265 } 2266 2267 spdk_spin_lock(&g_keyring_spin); 2268 if (!_accel_crypto_key_get(key->param.key_name)) { 2269 spdk_spin_unlock(&g_keyring_spin); 2270 return -ENOENT; 2271 } 2272 TAILQ_REMOVE(&g_keyring, key, link); 2273 spdk_spin_unlock(&g_keyring_spin); 2274 2275 accel_crypto_key_destroy_unsafe(key); 2276 2277 return 0; 2278 } 2279 2280 struct spdk_accel_crypto_key * 2281 spdk_accel_crypto_key_get(const char *name) 2282 { 2283 struct spdk_accel_crypto_key *key; 2284 2285 spdk_spin_lock(&g_keyring_spin); 2286 key = _accel_crypto_key_get(name); 2287 spdk_spin_unlock(&g_keyring_spin); 2288 2289 return key; 2290 } 2291 2292 /* Helper function when accel modules register with the framework. */ 2293 void 2294 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module) 2295 { 2296 struct spdk_accel_module_if *tmp; 2297 2298 if (_module_find_by_name(accel_module->name)) { 2299 SPDK_NOTICELOG("Module %s already registered\n", accel_module->name); 2300 assert(false); 2301 return; 2302 } 2303 2304 TAILQ_FOREACH(tmp, &spdk_accel_module_list, tailq) { 2305 if (accel_module->priority < tmp->priority) { 2306 break; 2307 } 2308 } 2309 2310 if (tmp != NULL) { 2311 TAILQ_INSERT_BEFORE(tmp, accel_module, tailq); 2312 } else { 2313 TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq); 2314 } 2315 } 2316 2317 /* Framework level channel create callback. */ 2318 static int 2319 accel_create_channel(void *io_device, void *ctx_buf) 2320 { 2321 struct accel_io_channel *accel_ch = ctx_buf; 2322 struct spdk_accel_task *accel_task; 2323 struct spdk_accel_sequence *seq; 2324 struct accel_buffer *buf; 2325 uint8_t *task_mem; 2326 uint32_t i = 0, j; 2327 int rc; 2328 2329 accel_ch->task_pool_base = calloc(g_opts.task_count, g_max_accel_module_size); 2330 if (accel_ch->task_pool_base == NULL) { 2331 return -ENOMEM; 2332 } 2333 2334 accel_ch->seq_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE, 2335 g_opts.sequence_count * sizeof(struct spdk_accel_sequence)); 2336 if (accel_ch->seq_pool_base == NULL) { 2337 goto err; 2338 } 2339 memset(accel_ch->seq_pool_base, 0, g_opts.sequence_count * sizeof(struct spdk_accel_sequence)); 2340 2341 accel_ch->buf_pool_base = calloc(g_opts.buf_count, sizeof(struct accel_buffer)); 2342 if (accel_ch->buf_pool_base == NULL) { 2343 goto err; 2344 } 2345 2346 TAILQ_INIT(&accel_ch->task_pool); 2347 SLIST_INIT(&accel_ch->seq_pool); 2348 SLIST_INIT(&accel_ch->buf_pool); 2349 2350 task_mem = accel_ch->task_pool_base; 2351 for (i = 0; i < g_opts.task_count; i++) { 2352 accel_task = (struct spdk_accel_task *)task_mem; 2353 TAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link); 2354 task_mem += g_max_accel_module_size; 2355 } 2356 for (i = 0; i < g_opts.sequence_count; i++) { 2357 seq = &accel_ch->seq_pool_base[i]; 2358 SLIST_INSERT_HEAD(&accel_ch->seq_pool, seq, link); 2359 } 2360 for (i = 0; i < g_opts.buf_count; i++) { 2361 buf = &accel_ch->buf_pool_base[i]; 2362 SLIST_INSERT_HEAD(&accel_ch->buf_pool, buf, link); 2363 } 2364 2365 /* Assign modules and get IO channels for each */ 2366 for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) { 2367 accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel(); 2368 /* This can happen if idxd runs out of channels. */ 2369 if (accel_ch->module_ch[i] == NULL) { 2370 SPDK_ERRLOG("Module %s failed to get io channel\n", g_modules_opc[i].module->name); 2371 goto err; 2372 } 2373 } 2374 2375 if (g_accel_driver != NULL) { 2376 accel_ch->driver_channel = g_accel_driver->get_io_channel(); 2377 if (accel_ch->driver_channel == NULL) { 2378 SPDK_ERRLOG("Failed to get driver's IO channel\n"); 2379 goto err; 2380 } 2381 } 2382 2383 rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", g_opts.small_cache_size, 2384 g_opts.large_cache_size); 2385 if (rc != 0) { 2386 SPDK_ERRLOG("Failed to initialize iobuf accel channel\n"); 2387 goto err; 2388 } 2389 2390 return 0; 2391 err: 2392 if (accel_ch->driver_channel != NULL) { 2393 spdk_put_io_channel(accel_ch->driver_channel); 2394 } 2395 for (j = 0; j < i; j++) { 2396 spdk_put_io_channel(accel_ch->module_ch[j]); 2397 } 2398 free(accel_ch->task_pool_base); 2399 free(accel_ch->seq_pool_base); 2400 free(accel_ch->buf_pool_base); 2401 2402 return -ENOMEM; 2403 } 2404 2405 static void 2406 accel_add_stats(struct accel_stats *total, struct accel_stats *stats) 2407 { 2408 int i; 2409 2410 total->sequence_executed += stats->sequence_executed; 2411 total->sequence_failed += stats->sequence_failed; 2412 total->retry.task += stats->retry.task; 2413 total->retry.sequence += stats->retry.sequence; 2414 total->retry.iobuf += stats->retry.iobuf; 2415 total->retry.bufdesc += stats->retry.bufdesc; 2416 for (i = 0; i < SPDK_ACCEL_OPC_LAST; ++i) { 2417 total->operations[i].executed += stats->operations[i].executed; 2418 total->operations[i].failed += stats->operations[i].failed; 2419 total->operations[i].num_bytes += stats->operations[i].num_bytes; 2420 } 2421 } 2422 2423 /* Framework level channel destroy callback. */ 2424 static void 2425 accel_destroy_channel(void *io_device, void *ctx_buf) 2426 { 2427 struct accel_io_channel *accel_ch = ctx_buf; 2428 int i; 2429 2430 spdk_iobuf_channel_fini(&accel_ch->iobuf); 2431 2432 if (accel_ch->driver_channel != NULL) { 2433 spdk_put_io_channel(accel_ch->driver_channel); 2434 } 2435 2436 for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) { 2437 assert(accel_ch->module_ch[i] != NULL); 2438 spdk_put_io_channel(accel_ch->module_ch[i]); 2439 accel_ch->module_ch[i] = NULL; 2440 } 2441 2442 /* Update global stats to make sure channel's stats aren't lost after a channel is gone */ 2443 spdk_spin_lock(&g_stats_lock); 2444 accel_add_stats(&g_stats, &accel_ch->stats); 2445 spdk_spin_unlock(&g_stats_lock); 2446 2447 free(accel_ch->task_pool_base); 2448 free(accel_ch->seq_pool_base); 2449 free(accel_ch->buf_pool_base); 2450 } 2451 2452 struct spdk_io_channel * 2453 spdk_accel_get_io_channel(void) 2454 { 2455 return spdk_get_io_channel(&spdk_accel_module_list); 2456 } 2457 2458 static int 2459 accel_module_initialize(void) 2460 { 2461 struct spdk_accel_module_if *accel_module, *tmp_module; 2462 int rc = 0, module_rc; 2463 2464 TAILQ_FOREACH_SAFE(accel_module, &spdk_accel_module_list, tailq, tmp_module) { 2465 module_rc = accel_module->module_init(); 2466 if (module_rc) { 2467 SPDK_ERRLOG("Module %s initialization failed with %d\n", accel_module->name, module_rc); 2468 TAILQ_REMOVE(&spdk_accel_module_list, accel_module, tailq); 2469 if (!rc) { 2470 rc = module_rc; 2471 } 2472 } 2473 2474 SPDK_DEBUGLOG(accel, "Module %s initialized.\n", accel_module->name); 2475 } 2476 2477 return rc; 2478 } 2479 2480 static void 2481 accel_module_init_opcode(enum spdk_accel_opcode opcode) 2482 { 2483 struct accel_module *module = &g_modules_opc[opcode]; 2484 struct spdk_accel_module_if *module_if = module->module; 2485 2486 if (module_if->get_memory_domains != NULL) { 2487 module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0; 2488 } 2489 } 2490 2491 int 2492 spdk_accel_initialize(void) 2493 { 2494 enum spdk_accel_opcode op; 2495 struct spdk_accel_module_if *accel_module = NULL; 2496 int rc; 2497 2498 /* 2499 * We need a unique identifier for the accel framework, so use the 2500 * spdk_accel_module_list address for this purpose. 2501 */ 2502 spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel, 2503 sizeof(struct accel_io_channel), "accel"); 2504 2505 spdk_spin_init(&g_keyring_spin); 2506 spdk_spin_init(&g_stats_lock); 2507 2508 rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL, 2509 "SPDK_ACCEL_DMA_DEVICE"); 2510 if (rc != 0) { 2511 SPDK_ERRLOG("Failed to create accel memory domain\n"); 2512 return rc; 2513 } 2514 2515 g_modules_started = true; 2516 rc = accel_module_initialize(); 2517 if (rc) { 2518 return rc; 2519 } 2520 2521 if (g_accel_driver != NULL && g_accel_driver->init != NULL) { 2522 rc = g_accel_driver->init(); 2523 if (rc != 0) { 2524 SPDK_ERRLOG("Failed to initialize driver %s: %s\n", g_accel_driver->name, 2525 spdk_strerror(-rc)); 2526 return rc; 2527 } 2528 } 2529 2530 /* The module list is order by priority, with the highest priority modules being at the end 2531 * of the list. The software module should be somewhere at the beginning of the list, 2532 * before all HW modules. 2533 * NOTE: all opcodes must be supported by software in the event that no HW modules are 2534 * initialized to support the operation. 2535 */ 2536 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 2537 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 2538 if (accel_module->supports_opcode(op)) { 2539 g_modules_opc[op].module = accel_module; 2540 SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name); 2541 } 2542 } 2543 2544 if (accel_module->get_ctx_size != NULL) { 2545 g_max_accel_module_size = spdk_max(g_max_accel_module_size, 2546 accel_module->get_ctx_size()); 2547 } 2548 } 2549 2550 /* Now lets check for overrides and apply all that exist */ 2551 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 2552 if (g_modules_opc_override[op] != NULL) { 2553 accel_module = _module_find_by_name(g_modules_opc_override[op]); 2554 if (accel_module == NULL) { 2555 SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]); 2556 return -EINVAL; 2557 } 2558 if (accel_module->supports_opcode(op) == false) { 2559 SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op); 2560 return -EINVAL; 2561 } 2562 g_modules_opc[op].module = accel_module; 2563 } 2564 } 2565 2566 if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) { 2567 SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations"); 2568 return -EINVAL; 2569 } 2570 2571 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 2572 assert(g_modules_opc[op].module != NULL); 2573 accel_module_init_opcode(op); 2574 } 2575 2576 rc = spdk_iobuf_register_module("accel"); 2577 if (rc != 0) { 2578 SPDK_ERRLOG("Failed to register accel iobuf module\n"); 2579 return rc; 2580 } 2581 2582 return 0; 2583 } 2584 2585 static void 2586 accel_module_finish_cb(void) 2587 { 2588 spdk_accel_fini_cb cb_fn = g_fini_cb_fn; 2589 2590 cb_fn(g_fini_cb_arg); 2591 g_fini_cb_fn = NULL; 2592 g_fini_cb_arg = NULL; 2593 } 2594 2595 static void 2596 accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str, 2597 const char *module_str) 2598 { 2599 spdk_json_write_object_begin(w); 2600 spdk_json_write_named_string(w, "method", "accel_assign_opc"); 2601 spdk_json_write_named_object_begin(w, "params"); 2602 spdk_json_write_named_string(w, "opname", opc_str); 2603 spdk_json_write_named_string(w, "module", module_str); 2604 spdk_json_write_object_end(w); 2605 spdk_json_write_object_end(w); 2606 } 2607 2608 static void 2609 __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key) 2610 { 2611 spdk_json_write_named_string(w, "name", key->param.key_name); 2612 spdk_json_write_named_string(w, "cipher", key->param.cipher); 2613 spdk_json_write_named_string(w, "key", key->param.hex_key); 2614 if (key->param.hex_key2) { 2615 spdk_json_write_named_string(w, "key2", key->param.hex_key2); 2616 } 2617 2618 if (key->param.tweak_mode) { 2619 spdk_json_write_named_string(w, "tweak_mode", key->param.tweak_mode); 2620 } 2621 } 2622 2623 void 2624 _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key) 2625 { 2626 spdk_json_write_object_begin(w); 2627 __accel_crypto_key_dump_param(w, key); 2628 spdk_json_write_object_end(w); 2629 } 2630 2631 static void 2632 _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w, 2633 struct spdk_accel_crypto_key *key) 2634 { 2635 spdk_json_write_object_begin(w); 2636 spdk_json_write_named_string(w, "method", "accel_crypto_key_create"); 2637 spdk_json_write_named_object_begin(w, "params"); 2638 __accel_crypto_key_dump_param(w, key); 2639 spdk_json_write_object_end(w); 2640 spdk_json_write_object_end(w); 2641 } 2642 2643 static void 2644 accel_write_options(struct spdk_json_write_ctx *w) 2645 { 2646 spdk_json_write_object_begin(w); 2647 spdk_json_write_named_string(w, "method", "accel_set_options"); 2648 spdk_json_write_named_object_begin(w, "params"); 2649 spdk_json_write_named_uint32(w, "small_cache_size", g_opts.small_cache_size); 2650 spdk_json_write_named_uint32(w, "large_cache_size", g_opts.large_cache_size); 2651 spdk_json_write_named_uint32(w, "task_count", g_opts.task_count); 2652 spdk_json_write_named_uint32(w, "sequence_count", g_opts.sequence_count); 2653 spdk_json_write_named_uint32(w, "buf_count", g_opts.buf_count); 2654 spdk_json_write_object_end(w); 2655 spdk_json_write_object_end(w); 2656 } 2657 2658 static void 2659 _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump) 2660 { 2661 struct spdk_accel_crypto_key *key; 2662 2663 spdk_spin_lock(&g_keyring_spin); 2664 TAILQ_FOREACH(key, &g_keyring, link) { 2665 if (full_dump) { 2666 _accel_crypto_key_write_config_json(w, key); 2667 } else { 2668 _accel_crypto_key_dump_param(w, key); 2669 } 2670 } 2671 spdk_spin_unlock(&g_keyring_spin); 2672 } 2673 2674 void 2675 _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w) 2676 { 2677 _accel_crypto_keys_write_config_json(w, false); 2678 } 2679 2680 void 2681 spdk_accel_write_config_json(struct spdk_json_write_ctx *w) 2682 { 2683 struct spdk_accel_module_if *accel_module; 2684 int i; 2685 2686 spdk_json_write_array_begin(w); 2687 accel_write_options(w); 2688 2689 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 2690 if (accel_module->write_config_json) { 2691 accel_module->write_config_json(w); 2692 } 2693 } 2694 for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) { 2695 if (g_modules_opc_override[i]) { 2696 accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]); 2697 } 2698 } 2699 2700 _accel_crypto_keys_write_config_json(w, true); 2701 2702 spdk_json_write_array_end(w); 2703 } 2704 2705 void 2706 spdk_accel_module_finish(void) 2707 { 2708 if (!g_accel_module) { 2709 g_accel_module = TAILQ_FIRST(&spdk_accel_module_list); 2710 } else { 2711 g_accel_module = TAILQ_NEXT(g_accel_module, tailq); 2712 } 2713 2714 if (!g_accel_module) { 2715 if (g_accel_driver != NULL && g_accel_driver->fini != NULL) { 2716 g_accel_driver->fini(); 2717 } 2718 2719 spdk_spin_destroy(&g_keyring_spin); 2720 spdk_spin_destroy(&g_stats_lock); 2721 if (g_accel_domain) { 2722 spdk_memory_domain_destroy(g_accel_domain); 2723 g_accel_domain = NULL; 2724 } 2725 accel_module_finish_cb(); 2726 return; 2727 } 2728 2729 if (g_accel_module->module_fini) { 2730 spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL); 2731 } else { 2732 spdk_accel_module_finish(); 2733 } 2734 } 2735 2736 static void 2737 accel_io_device_unregister_cb(void *io_device) 2738 { 2739 struct spdk_accel_crypto_key *key, *key_tmp; 2740 enum spdk_accel_opcode op; 2741 2742 spdk_spin_lock(&g_keyring_spin); 2743 TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) { 2744 accel_crypto_key_destroy_unsafe(key); 2745 } 2746 spdk_spin_unlock(&g_keyring_spin); 2747 2748 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 2749 if (g_modules_opc_override[op] != NULL) { 2750 free(g_modules_opc_override[op]); 2751 g_modules_opc_override[op] = NULL; 2752 } 2753 g_modules_opc[op].module = NULL; 2754 } 2755 2756 spdk_accel_module_finish(); 2757 } 2758 2759 void 2760 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg) 2761 { 2762 assert(cb_fn != NULL); 2763 2764 g_fini_cb_fn = cb_fn; 2765 g_fini_cb_arg = cb_arg; 2766 2767 spdk_io_device_unregister(&spdk_accel_module_list, accel_io_device_unregister_cb); 2768 } 2769 2770 static struct spdk_accel_driver * 2771 accel_find_driver(const char *name) 2772 { 2773 struct spdk_accel_driver *driver; 2774 2775 TAILQ_FOREACH(driver, &g_accel_drivers, tailq) { 2776 if (strcmp(driver->name, name) == 0) { 2777 return driver; 2778 } 2779 } 2780 2781 return NULL; 2782 } 2783 2784 int 2785 spdk_accel_set_driver(const char *name) 2786 { 2787 struct spdk_accel_driver *driver; 2788 2789 driver = accel_find_driver(name); 2790 if (driver == NULL) { 2791 SPDK_ERRLOG("Couldn't find driver named '%s'\n", name); 2792 return -ENODEV; 2793 } 2794 2795 g_accel_driver = driver; 2796 2797 return 0; 2798 } 2799 2800 void 2801 spdk_accel_driver_register(struct spdk_accel_driver *driver) 2802 { 2803 if (accel_find_driver(driver->name)) { 2804 SPDK_ERRLOG("Driver named '%s' has already been registered\n", driver->name); 2805 assert(0); 2806 return; 2807 } 2808 2809 TAILQ_INSERT_TAIL(&g_accel_drivers, driver, tailq); 2810 } 2811 2812 int 2813 spdk_accel_set_opts(const struct spdk_accel_opts *opts) 2814 { 2815 if (opts->size > sizeof(*opts)) { 2816 return -EINVAL; 2817 } 2818 2819 memcpy(&g_opts, opts, opts->size); 2820 2821 return 0; 2822 } 2823 2824 void 2825 spdk_accel_get_opts(struct spdk_accel_opts *opts) 2826 { 2827 size_t size = opts->size; 2828 2829 assert(size <= sizeof(*opts)); 2830 2831 memcpy(opts, &g_opts, spdk_min(sizeof(*opts), size)); 2832 opts->size = size; 2833 } 2834 2835 struct accel_get_stats_ctx { 2836 struct accel_stats stats; 2837 accel_get_stats_cb cb_fn; 2838 void *cb_arg; 2839 }; 2840 2841 static void 2842 accel_get_channel_stats_done(struct spdk_io_channel_iter *iter, int status) 2843 { 2844 struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter); 2845 2846 ctx->cb_fn(&ctx->stats, ctx->cb_arg); 2847 free(ctx); 2848 } 2849 2850 static void 2851 accel_get_channel_stats(struct spdk_io_channel_iter *iter) 2852 { 2853 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(iter); 2854 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 2855 struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter); 2856 2857 accel_add_stats(&ctx->stats, &accel_ch->stats); 2858 spdk_for_each_channel_continue(iter, 0); 2859 } 2860 2861 int 2862 accel_get_stats(accel_get_stats_cb cb_fn, void *cb_arg) 2863 { 2864 struct accel_get_stats_ctx *ctx; 2865 2866 ctx = calloc(1, sizeof(*ctx)); 2867 if (ctx == NULL) { 2868 return -ENOMEM; 2869 } 2870 2871 spdk_spin_lock(&g_stats_lock); 2872 accel_add_stats(&ctx->stats, &g_stats); 2873 spdk_spin_unlock(&g_stats_lock); 2874 2875 ctx->cb_fn = cb_fn; 2876 ctx->cb_arg = cb_arg; 2877 2878 spdk_for_each_channel(&spdk_accel_module_list, accel_get_channel_stats, ctx, 2879 accel_get_channel_stats_done); 2880 2881 return 0; 2882 } 2883 2884 void 2885 spdk_accel_get_opcode_stats(struct spdk_io_channel *ch, enum spdk_accel_opcode opcode, 2886 struct spdk_accel_opcode_stats *stats, size_t size) 2887 { 2888 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 2889 2890 #define FIELD_OK(field) \ 2891 offsetof(struct spdk_accel_opcode_stats, field) + sizeof(stats->field) <= size 2892 2893 #define SET_FIELD(field, value) \ 2894 if (FIELD_OK(field)) { \ 2895 stats->field = value; \ 2896 } 2897 2898 SET_FIELD(executed, accel_ch->stats.operations[opcode].executed); 2899 SET_FIELD(failed, accel_ch->stats.operations[opcode].failed); 2900 SET_FIELD(num_bytes, accel_ch->stats.operations[opcode].num_bytes); 2901 2902 #undef FIELD_OK 2903 #undef SET_FIELD 2904 } 2905 2906 uint8_t 2907 spdk_accel_get_buf_align(enum spdk_accel_opcode opcode, 2908 const struct spdk_accel_operation_exec_ctx *ctx) 2909 { 2910 struct spdk_accel_module_if *module = g_modules_opc[opcode].module; 2911 struct spdk_accel_opcode_info modinfo = {}, drvinfo = {}; 2912 2913 if (g_accel_driver != NULL && g_accel_driver->get_operation_info != NULL) { 2914 g_accel_driver->get_operation_info(opcode, ctx, &drvinfo); 2915 } 2916 2917 if (module->get_operation_info != NULL) { 2918 module->get_operation_info(opcode, ctx, &modinfo); 2919 } 2920 2921 /* If a driver is set, it'll execute most of the operations, while the rest will usually 2922 * fall back to accel_sw, which doesn't have any alignment requiremenets. However, to be 2923 * extra safe, return the max(driver, module) if a driver delegates some operations to a 2924 * hardware module. */ 2925 return spdk_max(modinfo.required_alignment, drvinfo.required_alignment); 2926 } 2927 2928 struct spdk_accel_module_if * 2929 spdk_accel_get_module(const char *name) 2930 { 2931 struct spdk_accel_module_if *module; 2932 2933 TAILQ_FOREACH(module, &spdk_accel_module_list, tailq) { 2934 if (strcmp(module->name, name) == 0) { 2935 return module; 2936 } 2937 } 2938 2939 return NULL; 2940 } 2941 2942 SPDK_LOG_REGISTER_COMPONENT(accel) 2943