1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2020 Intel Corporation. 3 * Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES. 4 * All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk/accel_module.h" 10 11 #include "accel_internal.h" 12 13 #include "spdk/dma.h" 14 #include "spdk/env.h" 15 #include "spdk/likely.h" 16 #include "spdk/log.h" 17 #include "spdk/thread.h" 18 #include "spdk/json.h" 19 #include "spdk/crc32.h" 20 #include "spdk/util.h" 21 #include "spdk/hexlify.h" 22 23 /* Accelerator Framework: The following provides a top level 24 * generic API for the accelerator functions defined here. Modules, 25 * such as the one in /module/accel/ioat, supply the implementation 26 * with the exception of the pure software implementation contained 27 * later in this file. 28 */ 29 30 #define ALIGN_4K 0x1000 31 #define MAX_TASKS_PER_CHANNEL 0x800 32 #define ACCEL_SMALL_CACHE_SIZE 128 33 #define ACCEL_LARGE_CACHE_SIZE 16 34 /* Set MSB, so we don't return NULL pointers as buffers */ 35 #define ACCEL_BUFFER_BASE ((void *)(1ull << 63)) 36 #define ACCEL_BUFFER_OFFSET_MASK ((uintptr_t)ACCEL_BUFFER_BASE - 1) 37 38 #define ACCEL_CRYPTO_TWEAK_MODE_DEFAULT SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA 39 40 struct accel_module { 41 struct spdk_accel_module_if *module; 42 bool supports_memory_domains; 43 }; 44 45 /* Largest context size for all accel modules */ 46 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task); 47 48 static struct spdk_accel_module_if *g_accel_module = NULL; 49 static spdk_accel_fini_cb g_fini_cb_fn = NULL; 50 static void *g_fini_cb_arg = NULL; 51 static bool g_modules_started = false; 52 static struct spdk_memory_domain *g_accel_domain; 53 54 /* Global list of registered accelerator modules */ 55 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list = 56 TAILQ_HEAD_INITIALIZER(spdk_accel_module_list); 57 58 /* Crypto keyring */ 59 static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring); 60 static struct spdk_spinlock g_keyring_spin; 61 62 /* Global array mapping capabilities to modules */ 63 static struct accel_module g_modules_opc[SPDK_ACCEL_OPC_LAST] = {}; 64 static char *g_modules_opc_override[SPDK_ACCEL_OPC_LAST] = {}; 65 TAILQ_HEAD(, spdk_accel_driver) g_accel_drivers = TAILQ_HEAD_INITIALIZER(g_accel_drivers); 66 static struct spdk_accel_driver *g_accel_driver; 67 static struct spdk_accel_opts g_opts = { 68 .small_cache_size = ACCEL_SMALL_CACHE_SIZE, 69 .large_cache_size = ACCEL_LARGE_CACHE_SIZE, 70 .task_count = MAX_TASKS_PER_CHANNEL, 71 .sequence_count = MAX_TASKS_PER_CHANNEL, 72 .buf_count = MAX_TASKS_PER_CHANNEL, 73 }; 74 static struct accel_stats g_stats; 75 static struct spdk_spinlock g_stats_lock; 76 77 static const char *g_opcode_strings[SPDK_ACCEL_OPC_LAST] = { 78 "copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c", 79 "compress", "decompress", "encrypt", "decrypt", "xor" 80 }; 81 82 enum accel_sequence_state { 83 ACCEL_SEQUENCE_STATE_INIT, 84 ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF, 85 ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF, 86 ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF, 87 ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF, 88 ACCEL_SEQUENCE_STATE_PULL_DATA, 89 ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA, 90 ACCEL_SEQUENCE_STATE_EXEC_TASK, 91 ACCEL_SEQUENCE_STATE_AWAIT_TASK, 92 ACCEL_SEQUENCE_STATE_COMPLETE_TASK, 93 ACCEL_SEQUENCE_STATE_NEXT_TASK, 94 ACCEL_SEQUENCE_STATE_PUSH_DATA, 95 ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA, 96 ACCEL_SEQUENCE_STATE_DRIVER_EXEC, 97 ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK, 98 ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE, 99 ACCEL_SEQUENCE_STATE_ERROR, 100 ACCEL_SEQUENCE_STATE_MAX, 101 }; 102 103 static const char *g_seq_states[] 104 __attribute__((unused)) = { 105 [ACCEL_SEQUENCE_STATE_INIT] = "init", 106 [ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf", 107 [ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf", 108 [ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf", 109 [ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf", 110 [ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data", 111 [ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data", 112 [ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task", 113 [ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task", 114 [ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task", 115 [ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task", 116 [ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data", 117 [ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data", 118 [ACCEL_SEQUENCE_STATE_DRIVER_EXEC] = "driver-exec", 119 [ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK] = "driver-await-task", 120 [ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE] = "driver-complete", 121 [ACCEL_SEQUENCE_STATE_ERROR] = "error", 122 [ACCEL_SEQUENCE_STATE_MAX] = "", 123 }; 124 125 #define ACCEL_SEQUENCE_STATE_STRING(s) \ 126 (((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \ 127 ? g_seq_states[s] : "unknown") 128 129 struct accel_buffer { 130 struct spdk_accel_sequence *seq; 131 void *buf; 132 uint64_t len; 133 struct spdk_iobuf_entry iobuf; 134 spdk_accel_sequence_get_buf_cb cb_fn; 135 void *cb_ctx; 136 TAILQ_ENTRY(accel_buffer) link; 137 }; 138 139 struct accel_io_channel { 140 struct spdk_io_channel *module_ch[SPDK_ACCEL_OPC_LAST]; 141 struct spdk_io_channel *driver_channel; 142 void *task_pool_base; 143 struct spdk_accel_sequence *seq_pool_base; 144 struct accel_buffer *buf_pool_base; 145 TAILQ_HEAD(, spdk_accel_task) task_pool; 146 TAILQ_HEAD(, spdk_accel_sequence) seq_pool; 147 TAILQ_HEAD(, accel_buffer) buf_pool; 148 struct spdk_iobuf_channel iobuf; 149 struct accel_stats stats; 150 }; 151 152 TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task); 153 154 struct spdk_accel_sequence { 155 struct accel_io_channel *ch; 156 struct accel_sequence_tasks tasks; 157 struct accel_sequence_tasks completed; 158 TAILQ_HEAD(, accel_buffer) bounce_bufs; 159 int status; 160 /* state uses enum accel_sequence_state */ 161 uint8_t state; 162 bool in_process_sequence; 163 spdk_accel_completion_cb cb_fn; 164 void *cb_arg; 165 TAILQ_ENTRY(spdk_accel_sequence) link; 166 }; 167 168 #define accel_update_stats(ch, event, v) \ 169 do { \ 170 (ch)->stats.event += (v); \ 171 } while (0) 172 173 #define accel_update_task_stats(ch, task, event, v) \ 174 accel_update_stats(ch, operations[(task)->op_code].event, v) 175 176 static inline void 177 accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state) 178 { 179 SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq, 180 ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state)); 181 assert(seq->state != ACCEL_SEQUENCE_STATE_ERROR || state == ACCEL_SEQUENCE_STATE_ERROR); 182 seq->state = state; 183 } 184 185 static void 186 accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status) 187 { 188 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR); 189 assert(status != 0); 190 seq->status = status; 191 } 192 193 int 194 spdk_accel_get_opc_module_name(enum spdk_accel_opcode opcode, const char **module_name) 195 { 196 if (opcode >= SPDK_ACCEL_OPC_LAST) { 197 /* invalid opcode */ 198 return -EINVAL; 199 } 200 201 if (g_modules_opc[opcode].module) { 202 *module_name = g_modules_opc[opcode].module->name; 203 } else { 204 return -ENOENT; 205 } 206 207 return 0; 208 } 209 210 void 211 _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn) 212 { 213 struct spdk_accel_module_if *accel_module; 214 enum spdk_accel_opcode opcode; 215 int j = 0; 216 217 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 218 for (opcode = 0; opcode < SPDK_ACCEL_OPC_LAST; opcode++) { 219 if (accel_module->supports_opcode(opcode)) { 220 info->ops[j] = opcode; 221 j++; 222 } 223 } 224 info->name = accel_module->name; 225 info->num_ops = j; 226 fn(info); 227 j = 0; 228 } 229 } 230 231 const char * 232 spdk_accel_get_opcode_name(enum spdk_accel_opcode opcode) 233 { 234 if (opcode < SPDK_ACCEL_OPC_LAST) { 235 return g_opcode_strings[opcode]; 236 } 237 238 return NULL; 239 } 240 241 int 242 spdk_accel_assign_opc(enum spdk_accel_opcode opcode, const char *name) 243 { 244 char *copy; 245 246 if (g_modules_started == true) { 247 /* we don't allow re-assignment once things have started */ 248 return -EINVAL; 249 } 250 251 if (opcode >= SPDK_ACCEL_OPC_LAST) { 252 /* invalid opcode */ 253 return -EINVAL; 254 } 255 256 copy = strdup(name); 257 if (copy == NULL) { 258 return -ENOMEM; 259 } 260 261 /* module selection will be validated after the framework starts. */ 262 g_modules_opc_override[opcode] = copy; 263 264 return 0; 265 } 266 267 void 268 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status) 269 { 270 struct accel_io_channel *accel_ch = accel_task->accel_ch; 271 spdk_accel_completion_cb cb_fn = accel_task->cb_fn; 272 void *cb_arg = accel_task->cb_arg; 273 274 /* We should put the accel_task into the list firstly in order to avoid 275 * the accel task list is exhausted when there is recursive call to 276 * allocate accel_task in user's call back function (cb_fn) 277 */ 278 TAILQ_INSERT_HEAD(&accel_ch->task_pool, accel_task, link); 279 accel_task->seq = NULL; 280 281 accel_update_task_stats(accel_ch, accel_task, executed, 1); 282 accel_update_task_stats(accel_ch, accel_task, num_bytes, accel_task->nbytes); 283 if (spdk_unlikely(status != 0)) { 284 accel_update_task_stats(accel_ch, accel_task, failed, 1); 285 } 286 287 cb_fn(cb_arg, status); 288 } 289 290 inline static struct spdk_accel_task * 291 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg) 292 { 293 struct spdk_accel_task *accel_task; 294 295 accel_task = TAILQ_FIRST(&accel_ch->task_pool); 296 if (spdk_unlikely(accel_task == NULL)) { 297 accel_update_stats(accel_ch, retry.task, 1); 298 return NULL; 299 } 300 301 TAILQ_REMOVE(&accel_ch->task_pool, accel_task, link); 302 accel_task->link.tqe_next = NULL; 303 accel_task->link.tqe_prev = NULL; 304 305 accel_task->cb_fn = cb_fn; 306 accel_task->cb_arg = cb_arg; 307 accel_task->accel_ch = accel_ch; 308 accel_task->s.iovs = NULL; 309 accel_task->d.iovs = NULL; 310 311 return accel_task; 312 } 313 314 static inline int 315 accel_submit_task(struct accel_io_channel *accel_ch, struct spdk_accel_task *task) 316 { 317 struct spdk_io_channel *module_ch = accel_ch->module_ch[task->op_code]; 318 struct spdk_accel_module_if *module = g_modules_opc[task->op_code].module; 319 int rc; 320 321 rc = module->submit_tasks(module_ch, task); 322 if (spdk_unlikely(rc != 0)) { 323 accel_update_task_stats(accel_ch, task, failed, 1); 324 } 325 326 return rc; 327 } 328 329 static inline uint64_t 330 accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt) 331 { 332 uint64_t result = 0; 333 uint32_t i; 334 335 for (i = 0; i < iovcnt; ++i) { 336 result += iovs[i].iov_len; 337 } 338 339 return result; 340 } 341 342 /* Accel framework public API for copy function */ 343 int 344 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src, 345 uint64_t nbytes, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg) 346 { 347 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 348 struct spdk_accel_task *accel_task; 349 350 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 351 if (spdk_unlikely(accel_task == NULL)) { 352 return -ENOMEM; 353 } 354 355 accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC]; 356 accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST]; 357 accel_task->d.iovs[0].iov_base = dst; 358 accel_task->d.iovs[0].iov_len = nbytes; 359 accel_task->d.iovcnt = 1; 360 accel_task->s.iovs[0].iov_base = src; 361 accel_task->s.iovs[0].iov_len = nbytes; 362 accel_task->s.iovcnt = 1; 363 accel_task->nbytes = nbytes; 364 accel_task->op_code = SPDK_ACCEL_OPC_COPY; 365 accel_task->flags = flags; 366 accel_task->src_domain = NULL; 367 accel_task->dst_domain = NULL; 368 accel_task->step_cb_fn = NULL; 369 370 return accel_submit_task(accel_ch, accel_task); 371 } 372 373 /* Accel framework public API for dual cast copy function */ 374 int 375 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1, 376 void *dst2, void *src, uint64_t nbytes, int flags, 377 spdk_accel_completion_cb cb_fn, void *cb_arg) 378 { 379 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 380 struct spdk_accel_task *accel_task; 381 382 if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) { 383 SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n"); 384 return -EINVAL; 385 } 386 387 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 388 if (spdk_unlikely(accel_task == NULL)) { 389 return -ENOMEM; 390 } 391 392 accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC]; 393 accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST]; 394 accel_task->d2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST2]; 395 accel_task->d.iovs[0].iov_base = dst1; 396 accel_task->d.iovs[0].iov_len = nbytes; 397 accel_task->d.iovcnt = 1; 398 accel_task->d2.iovs[0].iov_base = dst2; 399 accel_task->d2.iovs[0].iov_len = nbytes; 400 accel_task->d2.iovcnt = 1; 401 accel_task->s.iovs[0].iov_base = src; 402 accel_task->s.iovs[0].iov_len = nbytes; 403 accel_task->s.iovcnt = 1; 404 accel_task->nbytes = nbytes; 405 accel_task->flags = flags; 406 accel_task->op_code = SPDK_ACCEL_OPC_DUALCAST; 407 accel_task->src_domain = NULL; 408 accel_task->dst_domain = NULL; 409 accel_task->step_cb_fn = NULL; 410 411 return accel_submit_task(accel_ch, accel_task); 412 } 413 414 /* Accel framework public API for compare function */ 415 int 416 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1, 417 void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn, 418 void *cb_arg) 419 { 420 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 421 struct spdk_accel_task *accel_task; 422 423 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 424 if (spdk_unlikely(accel_task == NULL)) { 425 return -ENOMEM; 426 } 427 428 accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC]; 429 accel_task->s2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC2]; 430 accel_task->s.iovs[0].iov_base = src1; 431 accel_task->s.iovs[0].iov_len = nbytes; 432 accel_task->s.iovcnt = 1; 433 accel_task->s2.iovs[0].iov_base = src2; 434 accel_task->s2.iovs[0].iov_len = nbytes; 435 accel_task->s2.iovcnt = 1; 436 accel_task->nbytes = nbytes; 437 accel_task->op_code = SPDK_ACCEL_OPC_COMPARE; 438 accel_task->src_domain = NULL; 439 accel_task->dst_domain = NULL; 440 accel_task->step_cb_fn = NULL; 441 442 return accel_submit_task(accel_ch, accel_task); 443 } 444 445 /* Accel framework public API for fill function */ 446 int 447 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst, 448 uint8_t fill, uint64_t nbytes, int flags, 449 spdk_accel_completion_cb cb_fn, void *cb_arg) 450 { 451 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 452 struct spdk_accel_task *accel_task; 453 454 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 455 if (spdk_unlikely(accel_task == NULL)) { 456 return -ENOMEM; 457 } 458 459 accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST]; 460 accel_task->d.iovs[0].iov_base = dst; 461 accel_task->d.iovs[0].iov_len = nbytes; 462 accel_task->d.iovcnt = 1; 463 accel_task->nbytes = nbytes; 464 memset(&accel_task->fill_pattern, fill, sizeof(uint64_t)); 465 accel_task->flags = flags; 466 accel_task->op_code = SPDK_ACCEL_OPC_FILL; 467 accel_task->src_domain = NULL; 468 accel_task->dst_domain = NULL; 469 accel_task->step_cb_fn = NULL; 470 471 return accel_submit_task(accel_ch, accel_task); 472 } 473 474 /* Accel framework public API for CRC-32C function */ 475 int 476 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst, 477 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn, 478 void *cb_arg) 479 { 480 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 481 struct spdk_accel_task *accel_task; 482 483 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 484 if (spdk_unlikely(accel_task == NULL)) { 485 return -ENOMEM; 486 } 487 488 accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC]; 489 accel_task->s.iovs[0].iov_base = src; 490 accel_task->s.iovs[0].iov_len = nbytes; 491 accel_task->s.iovcnt = 1; 492 accel_task->nbytes = nbytes; 493 accel_task->crc_dst = crc_dst; 494 accel_task->seed = seed; 495 accel_task->op_code = SPDK_ACCEL_OPC_CRC32C; 496 accel_task->src_domain = NULL; 497 accel_task->dst_domain = NULL; 498 accel_task->step_cb_fn = NULL; 499 500 return accel_submit_task(accel_ch, accel_task); 501 } 502 503 /* Accel framework public API for chained CRC-32C function */ 504 int 505 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst, 506 struct iovec *iov, uint32_t iov_cnt, uint32_t seed, 507 spdk_accel_completion_cb cb_fn, void *cb_arg) 508 { 509 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 510 struct spdk_accel_task *accel_task; 511 512 if (iov == NULL) { 513 SPDK_ERRLOG("iov should not be NULL"); 514 return -EINVAL; 515 } 516 517 if (!iov_cnt) { 518 SPDK_ERRLOG("iovcnt should not be zero value\n"); 519 return -EINVAL; 520 } 521 522 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 523 if (spdk_unlikely(accel_task == NULL)) { 524 SPDK_ERRLOG("no memory\n"); 525 assert(0); 526 return -ENOMEM; 527 } 528 529 accel_task->s.iovs = iov; 530 accel_task->s.iovcnt = iov_cnt; 531 accel_task->nbytes = accel_get_iovlen(iov, iov_cnt); 532 accel_task->crc_dst = crc_dst; 533 accel_task->seed = seed; 534 accel_task->op_code = SPDK_ACCEL_OPC_CRC32C; 535 accel_task->src_domain = NULL; 536 accel_task->dst_domain = NULL; 537 accel_task->step_cb_fn = NULL; 538 539 return accel_submit_task(accel_ch, accel_task); 540 } 541 542 /* Accel framework public API for copy with CRC-32C function */ 543 int 544 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst, 545 void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes, 546 int flags, spdk_accel_completion_cb cb_fn, void *cb_arg) 547 { 548 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 549 struct spdk_accel_task *accel_task; 550 551 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 552 if (spdk_unlikely(accel_task == NULL)) { 553 return -ENOMEM; 554 } 555 556 accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC]; 557 accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST]; 558 accel_task->d.iovs[0].iov_base = dst; 559 accel_task->d.iovs[0].iov_len = nbytes; 560 accel_task->d.iovcnt = 1; 561 accel_task->s.iovs[0].iov_base = src; 562 accel_task->s.iovs[0].iov_len = nbytes; 563 accel_task->s.iovcnt = 1; 564 accel_task->nbytes = nbytes; 565 accel_task->crc_dst = crc_dst; 566 accel_task->seed = seed; 567 accel_task->flags = flags; 568 accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C; 569 accel_task->src_domain = NULL; 570 accel_task->dst_domain = NULL; 571 accel_task->step_cb_fn = NULL; 572 573 return accel_submit_task(accel_ch, accel_task); 574 } 575 576 /* Accel framework public API for chained copy + CRC-32C function */ 577 int 578 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst, 579 struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst, 580 uint32_t seed, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg) 581 { 582 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 583 struct spdk_accel_task *accel_task; 584 uint64_t nbytes; 585 586 if (src_iovs == NULL) { 587 SPDK_ERRLOG("iov should not be NULL"); 588 return -EINVAL; 589 } 590 591 if (!iov_cnt) { 592 SPDK_ERRLOG("iovcnt should not be zero value\n"); 593 return -EINVAL; 594 } 595 596 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 597 if (spdk_unlikely(accel_task == NULL)) { 598 SPDK_ERRLOG("no memory\n"); 599 assert(0); 600 return -ENOMEM; 601 } 602 603 nbytes = accel_get_iovlen(src_iovs, iov_cnt); 604 accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST]; 605 accel_task->d.iovs[0].iov_base = dst; 606 accel_task->d.iovs[0].iov_len = nbytes; 607 accel_task->d.iovcnt = 1; 608 accel_task->s.iovs = src_iovs; 609 accel_task->s.iovcnt = iov_cnt; 610 accel_task->nbytes = nbytes; 611 accel_task->crc_dst = crc_dst; 612 accel_task->seed = seed; 613 accel_task->flags = flags; 614 accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C; 615 accel_task->src_domain = NULL; 616 accel_task->dst_domain = NULL; 617 accel_task->step_cb_fn = NULL; 618 619 return accel_submit_task(accel_ch, accel_task); 620 } 621 622 int 623 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes, 624 struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size, int flags, 625 spdk_accel_completion_cb cb_fn, void *cb_arg) 626 { 627 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 628 struct spdk_accel_task *accel_task; 629 630 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 631 if (spdk_unlikely(accel_task == NULL)) { 632 return -ENOMEM; 633 } 634 635 accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST]; 636 accel_task->d.iovs[0].iov_base = dst; 637 accel_task->d.iovs[0].iov_len = nbytes; 638 accel_task->d.iovcnt = 1; 639 accel_task->output_size = output_size; 640 accel_task->s.iovs = src_iovs; 641 accel_task->s.iovcnt = src_iovcnt; 642 accel_task->nbytes = nbytes; 643 accel_task->flags = flags; 644 accel_task->op_code = SPDK_ACCEL_OPC_COMPRESS; 645 accel_task->src_domain = NULL; 646 accel_task->dst_domain = NULL; 647 accel_task->step_cb_fn = NULL; 648 649 return accel_submit_task(accel_ch, accel_task); 650 } 651 652 int 653 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs, 654 size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt, 655 uint32_t *output_size, int flags, spdk_accel_completion_cb cb_fn, 656 void *cb_arg) 657 { 658 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 659 struct spdk_accel_task *accel_task; 660 661 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 662 if (spdk_unlikely(accel_task == NULL)) { 663 return -ENOMEM; 664 } 665 666 accel_task->output_size = output_size; 667 accel_task->s.iovs = src_iovs; 668 accel_task->s.iovcnt = src_iovcnt; 669 accel_task->d.iovs = dst_iovs; 670 accel_task->d.iovcnt = dst_iovcnt; 671 accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 672 accel_task->flags = flags; 673 accel_task->op_code = SPDK_ACCEL_OPC_DECOMPRESS; 674 accel_task->src_domain = NULL; 675 accel_task->dst_domain = NULL; 676 accel_task->step_cb_fn = NULL; 677 678 return accel_submit_task(accel_ch, accel_task); 679 } 680 681 int 682 spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key, 683 struct iovec *dst_iovs, uint32_t dst_iovcnt, 684 struct iovec *src_iovs, uint32_t src_iovcnt, 685 uint64_t iv, uint32_t block_size, int flags, 686 spdk_accel_completion_cb cb_fn, void *cb_arg) 687 { 688 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 689 struct spdk_accel_task *accel_task; 690 691 if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) { 692 return -EINVAL; 693 } 694 695 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 696 if (spdk_unlikely(accel_task == NULL)) { 697 return -ENOMEM; 698 } 699 700 accel_task->crypto_key = key; 701 accel_task->s.iovs = src_iovs; 702 accel_task->s.iovcnt = src_iovcnt; 703 accel_task->d.iovs = dst_iovs; 704 accel_task->d.iovcnt = dst_iovcnt; 705 accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 706 accel_task->iv = iv; 707 accel_task->block_size = block_size; 708 accel_task->flags = flags; 709 accel_task->op_code = SPDK_ACCEL_OPC_ENCRYPT; 710 accel_task->src_domain = NULL; 711 accel_task->dst_domain = NULL; 712 accel_task->step_cb_fn = NULL; 713 714 return accel_submit_task(accel_ch, accel_task); 715 } 716 717 int 718 spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key, 719 struct iovec *dst_iovs, uint32_t dst_iovcnt, 720 struct iovec *src_iovs, uint32_t src_iovcnt, 721 uint64_t iv, uint32_t block_size, int flags, 722 spdk_accel_completion_cb cb_fn, void *cb_arg) 723 { 724 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 725 struct spdk_accel_task *accel_task; 726 727 if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) { 728 return -EINVAL; 729 } 730 731 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 732 if (spdk_unlikely(accel_task == NULL)) { 733 return -ENOMEM; 734 } 735 736 accel_task->crypto_key = key; 737 accel_task->s.iovs = src_iovs; 738 accel_task->s.iovcnt = src_iovcnt; 739 accel_task->d.iovs = dst_iovs; 740 accel_task->d.iovcnt = dst_iovcnt; 741 accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 742 accel_task->iv = iv; 743 accel_task->block_size = block_size; 744 accel_task->flags = flags; 745 accel_task->op_code = SPDK_ACCEL_OPC_DECRYPT; 746 accel_task->src_domain = NULL; 747 accel_task->dst_domain = NULL; 748 accel_task->step_cb_fn = NULL; 749 750 return accel_submit_task(accel_ch, accel_task); 751 } 752 753 int 754 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs, 755 uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg) 756 { 757 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 758 struct spdk_accel_task *accel_task; 759 760 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 761 if (spdk_unlikely(accel_task == NULL)) { 762 return -ENOMEM; 763 } 764 765 accel_task->nsrcs.srcs = sources; 766 accel_task->nsrcs.cnt = nsrcs; 767 accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST]; 768 accel_task->d.iovs[0].iov_base = dst; 769 accel_task->d.iovs[0].iov_len = nbytes; 770 accel_task->d.iovcnt = 1; 771 accel_task->nbytes = nbytes; 772 accel_task->op_code = SPDK_ACCEL_OPC_XOR; 773 accel_task->src_domain = NULL; 774 accel_task->dst_domain = NULL; 775 accel_task->step_cb_fn = NULL; 776 777 return accel_submit_task(accel_ch, accel_task); 778 } 779 780 static inline struct accel_buffer * 781 accel_get_buf(struct accel_io_channel *ch, uint64_t len) 782 { 783 struct accel_buffer *buf; 784 785 buf = TAILQ_FIRST(&ch->buf_pool); 786 if (spdk_unlikely(buf == NULL)) { 787 accel_update_stats(ch, retry.bufdesc, 1); 788 return NULL; 789 } 790 791 TAILQ_REMOVE(&ch->buf_pool, buf, link); 792 buf->len = len; 793 buf->buf = NULL; 794 buf->seq = NULL; 795 buf->cb_fn = NULL; 796 797 return buf; 798 } 799 800 static inline void 801 accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf) 802 { 803 if (buf->buf != NULL) { 804 spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len); 805 } 806 807 TAILQ_INSERT_HEAD(&ch->buf_pool, buf, link); 808 } 809 810 static inline struct spdk_accel_sequence * 811 accel_sequence_get(struct accel_io_channel *ch) 812 { 813 struct spdk_accel_sequence *seq; 814 815 seq = TAILQ_FIRST(&ch->seq_pool); 816 if (spdk_unlikely(seq == NULL)) { 817 accel_update_stats(ch, retry.sequence, 1); 818 return NULL; 819 } 820 821 TAILQ_REMOVE(&ch->seq_pool, seq, link); 822 823 TAILQ_INIT(&seq->tasks); 824 TAILQ_INIT(&seq->completed); 825 TAILQ_INIT(&seq->bounce_bufs); 826 827 seq->ch = ch; 828 seq->status = 0; 829 seq->state = ACCEL_SEQUENCE_STATE_INIT; 830 seq->in_process_sequence = false; 831 832 return seq; 833 } 834 835 static inline void 836 accel_sequence_put(struct spdk_accel_sequence *seq) 837 { 838 struct accel_io_channel *ch = seq->ch; 839 struct accel_buffer *buf; 840 841 while (!TAILQ_EMPTY(&seq->bounce_bufs)) { 842 buf = TAILQ_FIRST(&seq->bounce_bufs); 843 TAILQ_REMOVE(&seq->bounce_bufs, buf, link); 844 accel_put_buf(seq->ch, buf); 845 } 846 847 assert(TAILQ_EMPTY(&seq->tasks)); 848 assert(TAILQ_EMPTY(&seq->completed)); 849 seq->ch = NULL; 850 851 TAILQ_INSERT_HEAD(&ch->seq_pool, seq, link); 852 } 853 854 static void accel_sequence_task_cb(void *cb_arg, int status); 855 856 static inline struct spdk_accel_task * 857 accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq, 858 spdk_accel_step_cb cb_fn, void *cb_arg) 859 { 860 struct spdk_accel_task *task; 861 862 task = _get_task(ch, accel_sequence_task_cb, seq); 863 if (spdk_unlikely(task == NULL)) { 864 return task; 865 } 866 867 task->step_cb_fn = cb_fn; 868 task->step_cb_arg = cb_arg; 869 task->seq = seq; 870 871 return task; 872 } 873 874 int 875 spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 876 struct iovec *dst_iovs, uint32_t dst_iovcnt, 877 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 878 struct iovec *src_iovs, uint32_t src_iovcnt, 879 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 880 int flags, spdk_accel_step_cb cb_fn, void *cb_arg) 881 { 882 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 883 struct spdk_accel_task *task; 884 struct spdk_accel_sequence *seq = *pseq; 885 886 if (seq == NULL) { 887 seq = accel_sequence_get(accel_ch); 888 if (spdk_unlikely(seq == NULL)) { 889 return -ENOMEM; 890 } 891 } 892 893 assert(seq->ch == accel_ch); 894 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 895 if (spdk_unlikely(task == NULL)) { 896 if (*pseq == NULL) { 897 accel_sequence_put(seq); 898 } 899 900 return -ENOMEM; 901 } 902 903 task->dst_domain = dst_domain; 904 task->dst_domain_ctx = dst_domain_ctx; 905 task->d.iovs = dst_iovs; 906 task->d.iovcnt = dst_iovcnt; 907 task->src_domain = src_domain; 908 task->src_domain_ctx = src_domain_ctx; 909 task->s.iovs = src_iovs; 910 task->s.iovcnt = src_iovcnt; 911 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 912 task->flags = flags; 913 task->op_code = SPDK_ACCEL_OPC_COPY; 914 915 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 916 *pseq = seq; 917 918 return 0; 919 } 920 921 int 922 spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 923 void *buf, uint64_t len, 924 struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern, 925 int flags, spdk_accel_step_cb cb_fn, void *cb_arg) 926 { 927 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 928 struct spdk_accel_task *task; 929 struct spdk_accel_sequence *seq = *pseq; 930 931 if (seq == NULL) { 932 seq = accel_sequence_get(accel_ch); 933 if (spdk_unlikely(seq == NULL)) { 934 return -ENOMEM; 935 } 936 } 937 938 assert(seq->ch == accel_ch); 939 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 940 if (spdk_unlikely(task == NULL)) { 941 if (*pseq == NULL) { 942 accel_sequence_put(seq); 943 } 944 945 return -ENOMEM; 946 } 947 948 memset(&task->fill_pattern, pattern, sizeof(uint64_t)); 949 950 task->d.iovs = &task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST]; 951 task->d.iovs[0].iov_base = buf; 952 task->d.iovs[0].iov_len = len; 953 task->d.iovcnt = 1; 954 task->nbytes = len; 955 task->src_domain = NULL; 956 task->dst_domain = domain; 957 task->dst_domain_ctx = domain_ctx; 958 task->flags = flags; 959 task->op_code = SPDK_ACCEL_OPC_FILL; 960 961 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 962 *pseq = seq; 963 964 return 0; 965 } 966 967 int 968 spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 969 struct iovec *dst_iovs, size_t dst_iovcnt, 970 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 971 struct iovec *src_iovs, size_t src_iovcnt, 972 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 973 int flags, spdk_accel_step_cb cb_fn, void *cb_arg) 974 { 975 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 976 struct spdk_accel_task *task; 977 struct spdk_accel_sequence *seq = *pseq; 978 979 if (seq == NULL) { 980 seq = accel_sequence_get(accel_ch); 981 if (spdk_unlikely(seq == NULL)) { 982 return -ENOMEM; 983 } 984 } 985 986 assert(seq->ch == accel_ch); 987 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 988 if (spdk_unlikely(task == NULL)) { 989 if (*pseq == NULL) { 990 accel_sequence_put(seq); 991 } 992 993 return -ENOMEM; 994 } 995 996 /* TODO: support output_size for chaining */ 997 task->output_size = NULL; 998 task->dst_domain = dst_domain; 999 task->dst_domain_ctx = dst_domain_ctx; 1000 task->d.iovs = dst_iovs; 1001 task->d.iovcnt = dst_iovcnt; 1002 task->src_domain = src_domain; 1003 task->src_domain_ctx = src_domain_ctx; 1004 task->s.iovs = src_iovs; 1005 task->s.iovcnt = src_iovcnt; 1006 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1007 task->flags = flags; 1008 task->op_code = SPDK_ACCEL_OPC_DECOMPRESS; 1009 1010 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1011 *pseq = seq; 1012 1013 return 0; 1014 } 1015 1016 int 1017 spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1018 struct spdk_accel_crypto_key *key, 1019 struct iovec *dst_iovs, uint32_t dst_iovcnt, 1020 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1021 struct iovec *src_iovs, uint32_t src_iovcnt, 1022 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1023 uint64_t iv, uint32_t block_size, int flags, 1024 spdk_accel_step_cb cb_fn, void *cb_arg) 1025 { 1026 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1027 struct spdk_accel_task *task; 1028 struct spdk_accel_sequence *seq = *pseq; 1029 1030 assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size); 1031 1032 if (seq == NULL) { 1033 seq = accel_sequence_get(accel_ch); 1034 if (spdk_unlikely(seq == NULL)) { 1035 return -ENOMEM; 1036 } 1037 } 1038 1039 assert(seq->ch == accel_ch); 1040 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1041 if (spdk_unlikely(task == NULL)) { 1042 if (*pseq == NULL) { 1043 accel_sequence_put(seq); 1044 } 1045 1046 return -ENOMEM; 1047 } 1048 1049 task->crypto_key = key; 1050 task->src_domain = src_domain; 1051 task->src_domain_ctx = src_domain_ctx; 1052 task->s.iovs = src_iovs; 1053 task->s.iovcnt = src_iovcnt; 1054 task->dst_domain = dst_domain; 1055 task->dst_domain_ctx = dst_domain_ctx; 1056 task->d.iovs = dst_iovs; 1057 task->d.iovcnt = dst_iovcnt; 1058 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1059 task->iv = iv; 1060 task->block_size = block_size; 1061 task->flags = flags; 1062 task->op_code = SPDK_ACCEL_OPC_ENCRYPT; 1063 1064 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1065 *pseq = seq; 1066 1067 return 0; 1068 } 1069 1070 int 1071 spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1072 struct spdk_accel_crypto_key *key, 1073 struct iovec *dst_iovs, uint32_t dst_iovcnt, 1074 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1075 struct iovec *src_iovs, uint32_t src_iovcnt, 1076 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1077 uint64_t iv, uint32_t block_size, int flags, 1078 spdk_accel_step_cb cb_fn, void *cb_arg) 1079 { 1080 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1081 struct spdk_accel_task *task; 1082 struct spdk_accel_sequence *seq = *pseq; 1083 1084 assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size); 1085 1086 if (seq == NULL) { 1087 seq = accel_sequence_get(accel_ch); 1088 if (spdk_unlikely(seq == NULL)) { 1089 return -ENOMEM; 1090 } 1091 } 1092 1093 assert(seq->ch == accel_ch); 1094 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1095 if (spdk_unlikely(task == NULL)) { 1096 if (*pseq == NULL) { 1097 accel_sequence_put(seq); 1098 } 1099 1100 return -ENOMEM; 1101 } 1102 1103 task->crypto_key = key; 1104 task->src_domain = src_domain; 1105 task->src_domain_ctx = src_domain_ctx; 1106 task->s.iovs = src_iovs; 1107 task->s.iovcnt = src_iovcnt; 1108 task->dst_domain = dst_domain; 1109 task->dst_domain_ctx = dst_domain_ctx; 1110 task->d.iovs = dst_iovs; 1111 task->d.iovcnt = dst_iovcnt; 1112 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1113 task->iv = iv; 1114 task->block_size = block_size; 1115 task->flags = flags; 1116 task->op_code = SPDK_ACCEL_OPC_DECRYPT; 1117 1118 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1119 *pseq = seq; 1120 1121 return 0; 1122 } 1123 1124 int 1125 spdk_accel_append_crc32c(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1126 uint32_t *dst, struct iovec *iovs, uint32_t iovcnt, 1127 struct spdk_memory_domain *domain, void *domain_ctx, 1128 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg) 1129 { 1130 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1131 struct spdk_accel_task *task; 1132 struct spdk_accel_sequence *seq = *pseq; 1133 1134 if (seq == NULL) { 1135 seq = accel_sequence_get(accel_ch); 1136 if (spdk_unlikely(seq == NULL)) { 1137 return -ENOMEM; 1138 } 1139 } 1140 1141 assert(seq->ch == accel_ch); 1142 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1143 if (spdk_unlikely(task == NULL)) { 1144 if (*pseq == NULL) { 1145 accel_sequence_put(seq); 1146 } 1147 1148 return -ENOMEM; 1149 } 1150 1151 task->s.iovs = iovs; 1152 task->s.iovcnt = iovcnt; 1153 task->src_domain = domain; 1154 task->src_domain_ctx = domain_ctx; 1155 task->nbytes = accel_get_iovlen(iovs, iovcnt); 1156 task->crc_dst = dst; 1157 task->seed = seed; 1158 task->op_code = SPDK_ACCEL_OPC_CRC32C; 1159 task->dst_domain = NULL; 1160 1161 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1162 *pseq = seq; 1163 1164 return 0; 1165 } 1166 1167 int 1168 spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf, 1169 struct spdk_memory_domain **domain, void **domain_ctx) 1170 { 1171 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1172 struct accel_buffer *accel_buf; 1173 1174 accel_buf = accel_get_buf(accel_ch, len); 1175 if (spdk_unlikely(accel_buf == NULL)) { 1176 return -ENOMEM; 1177 } 1178 1179 /* We always return the same pointer and identify the buffers through domain_ctx */ 1180 *buf = ACCEL_BUFFER_BASE; 1181 *domain_ctx = accel_buf; 1182 *domain = g_accel_domain; 1183 1184 return 0; 1185 } 1186 1187 void 1188 spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf, 1189 struct spdk_memory_domain *domain, void *domain_ctx) 1190 { 1191 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1192 struct accel_buffer *accel_buf = domain_ctx; 1193 1194 assert(domain == g_accel_domain); 1195 assert(buf == ACCEL_BUFFER_BASE); 1196 1197 accel_put_buf(accel_ch, accel_buf); 1198 } 1199 1200 static void 1201 accel_sequence_complete_tasks(struct spdk_accel_sequence *seq) 1202 { 1203 struct spdk_accel_task *task; 1204 struct accel_io_channel *ch = seq->ch; 1205 spdk_accel_step_cb cb_fn; 1206 void *cb_arg; 1207 1208 while (!TAILQ_EMPTY(&seq->completed)) { 1209 task = TAILQ_FIRST(&seq->completed); 1210 TAILQ_REMOVE(&seq->completed, task, seq_link); 1211 cb_fn = task->step_cb_fn; 1212 cb_arg = task->step_cb_arg; 1213 TAILQ_INSERT_HEAD(&ch->task_pool, task, link); 1214 if (cb_fn != NULL) { 1215 cb_fn(cb_arg); 1216 } 1217 } 1218 1219 while (!TAILQ_EMPTY(&seq->tasks)) { 1220 task = TAILQ_FIRST(&seq->tasks); 1221 TAILQ_REMOVE(&seq->tasks, task, seq_link); 1222 cb_fn = task->step_cb_fn; 1223 cb_arg = task->step_cb_arg; 1224 TAILQ_INSERT_HEAD(&ch->task_pool, task, link); 1225 if (cb_fn != NULL) { 1226 cb_fn(cb_arg); 1227 } 1228 } 1229 } 1230 1231 static void 1232 accel_sequence_complete(struct spdk_accel_sequence *seq) 1233 { 1234 SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, seq->status); 1235 1236 accel_update_stats(seq->ch, sequence_executed, 1); 1237 if (spdk_unlikely(seq->status != 0)) { 1238 accel_update_stats(seq->ch, sequence_failed, 1); 1239 } 1240 1241 /* First notify all users that appended operations to this sequence */ 1242 accel_sequence_complete_tasks(seq); 1243 1244 /* Then notify the user that finished the sequence */ 1245 seq->cb_fn(seq->cb_arg, seq->status); 1246 1247 accel_sequence_put(seq); 1248 } 1249 1250 static void 1251 accel_update_virt_iov(struct iovec *diov, struct iovec *siov, struct accel_buffer *accel_buf) 1252 { 1253 uintptr_t offset; 1254 1255 offset = (uintptr_t)siov->iov_base & ACCEL_BUFFER_OFFSET_MASK; 1256 assert(offset < accel_buf->len); 1257 1258 diov->iov_base = (char *)accel_buf->buf + offset; 1259 diov->iov_len = siov->iov_len; 1260 } 1261 1262 static void 1263 accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf) 1264 { 1265 struct spdk_accel_task *task; 1266 struct iovec *iov; 1267 1268 /* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks 1269 * in a sequence that were using it. 1270 */ 1271 TAILQ_FOREACH(task, &seq->tasks, seq_link) { 1272 if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) { 1273 iov = &task->aux_iovs[SPDK_ACCEL_AXU_IOV_VIRT_SRC]; 1274 assert(task->s.iovcnt == 1); 1275 accel_update_virt_iov(iov, &task->s.iovs[0], buf); 1276 task->src_domain = NULL; 1277 task->s.iovs = iov; 1278 } 1279 if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) { 1280 iov = &task->aux_iovs[SPDK_ACCEL_AXU_IOV_VIRT_DST]; 1281 assert(task->d.iovcnt == 1); 1282 accel_update_virt_iov(iov, &task->d.iovs[0], buf); 1283 task->dst_domain = NULL; 1284 task->d.iovs = iov; 1285 } 1286 } 1287 } 1288 1289 static void accel_process_sequence(struct spdk_accel_sequence *seq); 1290 1291 static void 1292 accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf) 1293 { 1294 struct accel_buffer *accel_buf; 1295 1296 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1297 1298 assert(accel_buf->seq != NULL); 1299 assert(accel_buf->buf == NULL); 1300 accel_buf->buf = buf; 1301 1302 assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF); 1303 accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF); 1304 accel_sequence_set_virtbuf(accel_buf->seq, accel_buf); 1305 accel_process_sequence(accel_buf->seq); 1306 } 1307 1308 static bool 1309 accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf, 1310 spdk_iobuf_get_cb cb_fn) 1311 { 1312 struct accel_io_channel *ch = seq->ch; 1313 1314 assert(buf->buf == NULL); 1315 assert(buf->seq == NULL); 1316 1317 buf->seq = seq; 1318 buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn); 1319 if (buf->buf == NULL) { 1320 accel_update_stats(ch, retry.iobuf, 1); 1321 return false; 1322 } 1323 1324 return true; 1325 } 1326 1327 static bool 1328 accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1329 { 1330 /* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to 1331 * NULL */ 1332 if (task->src_domain == g_accel_domain) { 1333 if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx, 1334 accel_iobuf_get_virtbuf_cb)) { 1335 return false; 1336 } 1337 1338 accel_sequence_set_virtbuf(seq, task->src_domain_ctx); 1339 } 1340 1341 if (task->dst_domain == g_accel_domain) { 1342 if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx, 1343 accel_iobuf_get_virtbuf_cb)) { 1344 return false; 1345 } 1346 1347 accel_sequence_set_virtbuf(seq, task->dst_domain_ctx); 1348 } 1349 1350 return true; 1351 } 1352 1353 static void 1354 accel_sequence_get_buf_cb(struct spdk_iobuf_entry *entry, void *buf) 1355 { 1356 struct accel_buffer *accel_buf; 1357 1358 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1359 1360 assert(accel_buf->seq != NULL); 1361 assert(accel_buf->buf == NULL); 1362 accel_buf->buf = buf; 1363 1364 accel_sequence_set_virtbuf(accel_buf->seq, accel_buf); 1365 accel_buf->cb_fn(accel_buf->seq, accel_buf->cb_ctx); 1366 } 1367 1368 bool 1369 spdk_accel_alloc_sequence_buf(struct spdk_accel_sequence *seq, void *buf, 1370 struct spdk_memory_domain *domain, void *domain_ctx, 1371 spdk_accel_sequence_get_buf_cb cb_fn, void *cb_ctx) 1372 { 1373 struct accel_buffer *accel_buf = domain_ctx; 1374 1375 assert(domain == g_accel_domain); 1376 accel_buf->cb_fn = cb_fn; 1377 accel_buf->cb_ctx = cb_ctx; 1378 1379 if (!accel_sequence_alloc_buf(seq, accel_buf, accel_sequence_get_buf_cb)) { 1380 return false; 1381 } 1382 1383 accel_sequence_set_virtbuf(seq, accel_buf); 1384 1385 return true; 1386 } 1387 1388 struct spdk_accel_task * 1389 spdk_accel_sequence_first_task(struct spdk_accel_sequence *seq) 1390 { 1391 return TAILQ_FIRST(&seq->tasks); 1392 } 1393 1394 struct spdk_accel_task * 1395 spdk_accel_sequence_next_task(struct spdk_accel_task *task) 1396 { 1397 return TAILQ_NEXT(task, seq_link); 1398 } 1399 1400 static inline void 1401 accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs, 1402 uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx, 1403 struct accel_buffer *buf) 1404 { 1405 bounce->orig_iovs = *iovs; 1406 bounce->orig_iovcnt = *iovcnt; 1407 bounce->orig_domain = *domain; 1408 bounce->orig_domain_ctx = *domain_ctx; 1409 bounce->iov.iov_base = buf->buf; 1410 bounce->iov.iov_len = buf->len; 1411 1412 *iovs = &bounce->iov; 1413 *iovcnt = 1; 1414 *domain = NULL; 1415 } 1416 1417 static void 1418 accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf) 1419 { 1420 struct spdk_accel_task *task; 1421 struct accel_buffer *accel_buf; 1422 1423 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1424 assert(accel_buf->buf == NULL); 1425 accel_buf->buf = buf; 1426 1427 task = TAILQ_FIRST(&accel_buf->seq->tasks); 1428 assert(task != NULL); 1429 1430 assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF); 1431 accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF); 1432 accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain, 1433 &task->src_domain_ctx, accel_buf); 1434 accel_process_sequence(accel_buf->seq); 1435 } 1436 1437 static void 1438 accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf) 1439 { 1440 struct spdk_accel_task *task; 1441 struct accel_buffer *accel_buf; 1442 1443 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1444 assert(accel_buf->buf == NULL); 1445 accel_buf->buf = buf; 1446 1447 task = TAILQ_FIRST(&accel_buf->seq->tasks); 1448 assert(task != NULL); 1449 1450 assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF); 1451 accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF); 1452 accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain, 1453 &task->dst_domain_ctx, accel_buf); 1454 accel_process_sequence(accel_buf->seq); 1455 } 1456 1457 static int 1458 accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1459 { 1460 struct accel_buffer *buf; 1461 1462 if (task->src_domain != NULL) { 1463 /* By the time we're here, accel buffers should have been allocated */ 1464 assert(task->src_domain != g_accel_domain); 1465 1466 buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt)); 1467 if (buf == NULL) { 1468 SPDK_ERRLOG("Couldn't allocate buffer descriptor\n"); 1469 return -ENOMEM; 1470 } 1471 1472 TAILQ_INSERT_TAIL(&seq->bounce_bufs, buf, link); 1473 if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) { 1474 return -EAGAIN; 1475 } 1476 1477 accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt, 1478 &task->src_domain, &task->src_domain_ctx, buf); 1479 } 1480 1481 if (task->dst_domain != NULL) { 1482 /* By the time we're here, accel buffers should have been allocated */ 1483 assert(task->dst_domain != g_accel_domain); 1484 1485 buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt)); 1486 if (buf == NULL) { 1487 /* The src buffer will be released when a sequence is completed */ 1488 SPDK_ERRLOG("Couldn't allocate buffer descriptor\n"); 1489 return -ENOMEM; 1490 } 1491 1492 TAILQ_INSERT_TAIL(&seq->bounce_bufs, buf, link); 1493 if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) { 1494 return -EAGAIN; 1495 } 1496 1497 accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt, 1498 &task->dst_domain, &task->dst_domain_ctx, buf); 1499 } 1500 1501 return 0; 1502 } 1503 1504 static void 1505 accel_task_pull_data_cb(void *ctx, int status) 1506 { 1507 struct spdk_accel_sequence *seq = ctx; 1508 1509 assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA); 1510 if (spdk_likely(status == 0)) { 1511 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK); 1512 } else { 1513 accel_sequence_set_fail(seq, status); 1514 } 1515 1516 accel_process_sequence(seq); 1517 } 1518 1519 static void 1520 accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1521 { 1522 int rc; 1523 1524 assert(task->bounce.s.orig_iovs != NULL); 1525 assert(task->bounce.s.orig_domain != NULL); 1526 assert(task->bounce.s.orig_domain != g_accel_domain); 1527 assert(!g_modules_opc[task->op_code].supports_memory_domains); 1528 1529 rc = spdk_memory_domain_pull_data(task->bounce.s.orig_domain, 1530 task->bounce.s.orig_domain_ctx, 1531 task->bounce.s.orig_iovs, task->bounce.s.orig_iovcnt, 1532 task->s.iovs, task->s.iovcnt, 1533 accel_task_pull_data_cb, seq); 1534 if (spdk_unlikely(rc != 0)) { 1535 SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n", 1536 spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc); 1537 accel_sequence_set_fail(seq, rc); 1538 } 1539 } 1540 1541 static void 1542 accel_task_push_data_cb(void *ctx, int status) 1543 { 1544 struct spdk_accel_sequence *seq = ctx; 1545 1546 assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA); 1547 if (spdk_likely(status == 0)) { 1548 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK); 1549 } else { 1550 accel_sequence_set_fail(seq, status); 1551 } 1552 1553 accel_process_sequence(seq); 1554 } 1555 1556 static void 1557 accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1558 { 1559 int rc; 1560 1561 assert(task->bounce.d.orig_iovs != NULL); 1562 assert(task->bounce.d.orig_domain != NULL); 1563 assert(task->bounce.d.orig_domain != g_accel_domain); 1564 assert(!g_modules_opc[task->op_code].supports_memory_domains); 1565 1566 rc = spdk_memory_domain_push_data(task->bounce.d.orig_domain, 1567 task->bounce.d.orig_domain_ctx, 1568 task->bounce.d.orig_iovs, task->bounce.d.orig_iovcnt, 1569 task->d.iovs, task->d.iovcnt, 1570 accel_task_push_data_cb, seq); 1571 if (spdk_unlikely(rc != 0)) { 1572 SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n", 1573 spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc); 1574 accel_sequence_set_fail(seq, rc); 1575 } 1576 } 1577 1578 static void 1579 accel_process_sequence(struct spdk_accel_sequence *seq) 1580 { 1581 struct accel_io_channel *accel_ch = seq->ch; 1582 struct spdk_accel_task *task; 1583 enum accel_sequence_state state; 1584 int rc; 1585 1586 /* Prevent recursive calls to this function */ 1587 if (spdk_unlikely(seq->in_process_sequence)) { 1588 return; 1589 } 1590 seq->in_process_sequence = true; 1591 1592 task = TAILQ_FIRST(&seq->tasks); 1593 do { 1594 state = seq->state; 1595 switch (state) { 1596 case ACCEL_SEQUENCE_STATE_INIT: 1597 if (g_accel_driver != NULL) { 1598 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_EXEC); 1599 break; 1600 } 1601 /* Fall through */ 1602 case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF: 1603 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF); 1604 if (!accel_sequence_check_virtbuf(seq, task)) { 1605 /* We couldn't allocate a buffer, wait until one is available */ 1606 break; 1607 } 1608 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF); 1609 /* Fall through */ 1610 case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF: 1611 /* If a module supports memory domains, we don't need to allocate bounce 1612 * buffers */ 1613 if (g_modules_opc[task->op_code].supports_memory_domains) { 1614 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK); 1615 break; 1616 } 1617 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF); 1618 rc = accel_sequence_check_bouncebuf(seq, task); 1619 if (spdk_unlikely(rc != 0)) { 1620 /* We couldn't allocate a buffer, wait until one is available */ 1621 if (rc == -EAGAIN) { 1622 break; 1623 } 1624 accel_sequence_set_fail(seq, rc); 1625 break; 1626 } 1627 if (task->s.iovs == &task->bounce.s.iov) { 1628 assert(task->bounce.s.orig_iovs); 1629 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA); 1630 break; 1631 } 1632 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK); 1633 /* Fall through */ 1634 case ACCEL_SEQUENCE_STATE_EXEC_TASK: 1635 SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n", 1636 g_opcode_strings[task->op_code], seq); 1637 1638 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK); 1639 rc = accel_submit_task(accel_ch, task); 1640 if (spdk_unlikely(rc != 0)) { 1641 SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n", 1642 g_opcode_strings[task->op_code], seq); 1643 accel_sequence_set_fail(seq, rc); 1644 } 1645 break; 1646 case ACCEL_SEQUENCE_STATE_PULL_DATA: 1647 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA); 1648 accel_task_pull_data(seq, task); 1649 break; 1650 case ACCEL_SEQUENCE_STATE_COMPLETE_TASK: 1651 if (task->d.iovs == &task->bounce.d.iov) { 1652 assert(task->bounce.d.orig_iovs); 1653 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA); 1654 break; 1655 } 1656 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK); 1657 break; 1658 case ACCEL_SEQUENCE_STATE_PUSH_DATA: 1659 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA); 1660 accel_task_push_data(seq, task); 1661 break; 1662 case ACCEL_SEQUENCE_STATE_NEXT_TASK: 1663 TAILQ_REMOVE(&seq->tasks, task, seq_link); 1664 TAILQ_INSERT_TAIL(&seq->completed, task, seq_link); 1665 /* Check if there are any remaining tasks */ 1666 task = TAILQ_FIRST(&seq->tasks); 1667 if (task == NULL) { 1668 /* Immediately return here to make sure we don't touch the sequence 1669 * after it's completed */ 1670 accel_sequence_complete(seq); 1671 return; 1672 } 1673 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT); 1674 break; 1675 case ACCEL_SEQUENCE_STATE_DRIVER_EXEC: 1676 assert(!TAILQ_EMPTY(&seq->tasks)); 1677 1678 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK); 1679 rc = g_accel_driver->execute_sequence(accel_ch->driver_channel, seq); 1680 if (spdk_unlikely(rc != 0)) { 1681 SPDK_ERRLOG("Failed to execute sequence: %p using driver: %s\n", 1682 seq, g_accel_driver->name); 1683 accel_sequence_set_fail(seq, rc); 1684 } 1685 break; 1686 case ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE: 1687 /* Get the task again, as the driver might have completed some tasks 1688 * synchronously */ 1689 task = TAILQ_FIRST(&seq->tasks); 1690 if (task == NULL) { 1691 /* Immediately return here to make sure we don't touch the sequence 1692 * after it's completed */ 1693 accel_sequence_complete(seq); 1694 return; 1695 } 1696 /* We don't want to execute the next task through the driver, so we 1697 * explicitly omit the INIT state here */ 1698 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF); 1699 break; 1700 case ACCEL_SEQUENCE_STATE_ERROR: 1701 /* Immediately return here to make sure we don't touch the sequence 1702 * after it's completed */ 1703 assert(seq->status != 0); 1704 accel_sequence_complete(seq); 1705 return; 1706 case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF: 1707 case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF: 1708 case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA: 1709 case ACCEL_SEQUENCE_STATE_AWAIT_TASK: 1710 case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA: 1711 case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK: 1712 break; 1713 default: 1714 assert(0 && "bad state"); 1715 break; 1716 } 1717 } while (seq->state != state); 1718 1719 seq->in_process_sequence = false; 1720 } 1721 1722 static void 1723 accel_sequence_task_cb(void *cb_arg, int status) 1724 { 1725 struct spdk_accel_sequence *seq = cb_arg; 1726 struct spdk_accel_task *task = TAILQ_FIRST(&seq->tasks); 1727 struct accel_io_channel *accel_ch = seq->ch; 1728 1729 /* spdk_accel_task_complete() puts the task back to the task pool, but we don't want to do 1730 * that if a task is part of a sequence. Removing the task from that pool here is the 1731 * easiest way to prevent this, even though it is a bit hacky. 1732 */ 1733 assert(task != NULL); 1734 TAILQ_REMOVE(&accel_ch->task_pool, task, link); 1735 1736 switch (seq->state) { 1737 case ACCEL_SEQUENCE_STATE_AWAIT_TASK: 1738 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK); 1739 if (spdk_unlikely(status != 0)) { 1740 SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n", 1741 g_opcode_strings[task->op_code], seq); 1742 accel_sequence_set_fail(seq, status); 1743 } 1744 1745 accel_process_sequence(seq); 1746 break; 1747 case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK: 1748 assert(g_accel_driver != NULL); 1749 /* Immediately remove the task from the outstanding list to make sure the next call 1750 * to spdk_accel_sequence_first_task() doesn't return it */ 1751 TAILQ_REMOVE(&seq->tasks, task, seq_link); 1752 TAILQ_INSERT_TAIL(&seq->completed, task, seq_link); 1753 1754 if (spdk_unlikely(status != 0)) { 1755 SPDK_ERRLOG("Failed to execute %s operation, sequence: %p through " 1756 "driver: %s\n", g_opcode_strings[task->op_code], seq, 1757 g_accel_driver->name); 1758 /* Update status without using accel_sequence_set_fail() to avoid changing 1759 * seq's state to ERROR until driver calls spdk_accel_sequence_continue() */ 1760 seq->status = status; 1761 } 1762 break; 1763 default: 1764 assert(0 && "bad state"); 1765 break; 1766 } 1767 } 1768 1769 void 1770 spdk_accel_sequence_continue(struct spdk_accel_sequence *seq) 1771 { 1772 assert(g_accel_driver != NULL); 1773 assert(seq->state == ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK); 1774 1775 if (spdk_likely(seq->status == 0)) { 1776 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE); 1777 } else { 1778 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR); 1779 } 1780 1781 accel_process_sequence(seq); 1782 } 1783 1784 static bool 1785 accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt) 1786 { 1787 /* For now, just do a dumb check that the iovecs arrays are exactly the same */ 1788 if (iovacnt != iovbcnt) { 1789 return false; 1790 } 1791 1792 return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0; 1793 } 1794 1795 static bool 1796 accel_task_set_dstbuf(struct spdk_accel_task *task, struct spdk_accel_task *next) 1797 { 1798 struct spdk_accel_task *prev; 1799 1800 switch (task->op_code) { 1801 case SPDK_ACCEL_OPC_DECOMPRESS: 1802 case SPDK_ACCEL_OPC_FILL: 1803 case SPDK_ACCEL_OPC_ENCRYPT: 1804 case SPDK_ACCEL_OPC_DECRYPT: 1805 if (task->dst_domain != next->src_domain) { 1806 return false; 1807 } 1808 if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt, 1809 next->s.iovs, next->s.iovcnt)) { 1810 return false; 1811 } 1812 task->d.iovs = next->d.iovs; 1813 task->d.iovcnt = next->d.iovcnt; 1814 task->dst_domain = next->dst_domain; 1815 task->dst_domain_ctx = next->dst_domain_ctx; 1816 break; 1817 case SPDK_ACCEL_OPC_CRC32C: 1818 /* crc32 is special, because it doesn't have a dst buffer */ 1819 if (task->src_domain != next->src_domain) { 1820 return false; 1821 } 1822 if (!accel_compare_iovs(task->s.iovs, task->s.iovcnt, 1823 next->s.iovs, next->s.iovcnt)) { 1824 return false; 1825 } 1826 /* We can only change crc32's buffer if we can change previous task's buffer */ 1827 prev = TAILQ_PREV(task, accel_sequence_tasks, seq_link); 1828 if (prev == NULL) { 1829 return false; 1830 } 1831 if (!accel_task_set_dstbuf(prev, next)) { 1832 return false; 1833 } 1834 task->s.iovs = next->d.iovs; 1835 task->s.iovcnt = next->d.iovcnt; 1836 task->src_domain = next->dst_domain; 1837 task->src_domain_ctx = next->dst_domain_ctx; 1838 break; 1839 default: 1840 return false; 1841 } 1842 1843 return true; 1844 } 1845 1846 static void 1847 accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task, 1848 struct spdk_accel_task **next_task) 1849 { 1850 struct spdk_accel_task *next = *next_task; 1851 1852 switch (task->op_code) { 1853 case SPDK_ACCEL_OPC_COPY: 1854 /* We only allow changing src of operations that actually have a src, e.g. we never 1855 * do it for fill. Theoretically, it is possible, but we'd have to be careful to 1856 * change the src of the operation after fill (which in turn could also be a fill). 1857 * So, for the sake of simplicity, skip this type of operations for now. 1858 */ 1859 if (next->op_code != SPDK_ACCEL_OPC_DECOMPRESS && 1860 next->op_code != SPDK_ACCEL_OPC_COPY && 1861 next->op_code != SPDK_ACCEL_OPC_ENCRYPT && 1862 next->op_code != SPDK_ACCEL_OPC_DECRYPT && 1863 next->op_code != SPDK_ACCEL_OPC_CRC32C) { 1864 break; 1865 } 1866 if (task->dst_domain != next->src_domain) { 1867 break; 1868 } 1869 if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt, 1870 next->s.iovs, next->s.iovcnt)) { 1871 break; 1872 } 1873 next->s.iovs = task->s.iovs; 1874 next->s.iovcnt = task->s.iovcnt; 1875 next->src_domain = task->src_domain; 1876 next->src_domain_ctx = task->src_domain_ctx; 1877 TAILQ_REMOVE(&seq->tasks, task, seq_link); 1878 TAILQ_INSERT_TAIL(&seq->completed, task, seq_link); 1879 break; 1880 case SPDK_ACCEL_OPC_DECOMPRESS: 1881 case SPDK_ACCEL_OPC_FILL: 1882 case SPDK_ACCEL_OPC_ENCRYPT: 1883 case SPDK_ACCEL_OPC_DECRYPT: 1884 case SPDK_ACCEL_OPC_CRC32C: 1885 /* We can only merge tasks when one of them is a copy */ 1886 if (next->op_code != SPDK_ACCEL_OPC_COPY) { 1887 break; 1888 } 1889 if (!accel_task_set_dstbuf(task, next)) { 1890 break; 1891 } 1892 /* We're removing next_task from the tasks queue, so we need to update its pointer, 1893 * so that the TAILQ_FOREACH_SAFE() loop below works correctly */ 1894 *next_task = TAILQ_NEXT(next, seq_link); 1895 TAILQ_REMOVE(&seq->tasks, next, seq_link); 1896 TAILQ_INSERT_TAIL(&seq->completed, next, seq_link); 1897 break; 1898 default: 1899 assert(0 && "bad opcode"); 1900 break; 1901 } 1902 } 1903 1904 void 1905 spdk_accel_sequence_finish(struct spdk_accel_sequence *seq, 1906 spdk_accel_completion_cb cb_fn, void *cb_arg) 1907 { 1908 struct spdk_accel_task *task, *next; 1909 1910 /* Try to remove any copy operations if possible */ 1911 TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) { 1912 if (next == NULL) { 1913 break; 1914 } 1915 accel_sequence_merge_tasks(seq, task, &next); 1916 } 1917 1918 seq->cb_fn = cb_fn; 1919 seq->cb_arg = cb_arg; 1920 1921 accel_process_sequence(seq); 1922 } 1923 1924 void 1925 spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq) 1926 { 1927 struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks); 1928 struct spdk_accel_task *task; 1929 1930 assert(TAILQ_EMPTY(&seq->completed)); 1931 TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link); 1932 1933 while (!TAILQ_EMPTY(&tasks)) { 1934 task = TAILQ_FIRST(&tasks); 1935 TAILQ_REMOVE(&tasks, task, seq_link); 1936 TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link); 1937 } 1938 } 1939 1940 void 1941 spdk_accel_sequence_abort(struct spdk_accel_sequence *seq) 1942 { 1943 if (seq == NULL) { 1944 return; 1945 } 1946 1947 accel_sequence_complete_tasks(seq); 1948 accel_sequence_put(seq); 1949 } 1950 1951 struct spdk_memory_domain * 1952 spdk_accel_get_memory_domain(void) 1953 { 1954 return g_accel_domain; 1955 } 1956 1957 static struct spdk_accel_module_if * 1958 _module_find_by_name(const char *name) 1959 { 1960 struct spdk_accel_module_if *accel_module = NULL; 1961 1962 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 1963 if (strcmp(name, accel_module->name) == 0) { 1964 break; 1965 } 1966 } 1967 1968 return accel_module; 1969 } 1970 1971 static inline struct spdk_accel_crypto_key * 1972 _accel_crypto_key_get(const char *name) 1973 { 1974 struct spdk_accel_crypto_key *key; 1975 1976 assert(spdk_spin_held(&g_keyring_spin)); 1977 1978 TAILQ_FOREACH(key, &g_keyring, link) { 1979 if (strcmp(name, key->param.key_name) == 0) { 1980 return key; 1981 } 1982 } 1983 1984 return NULL; 1985 } 1986 1987 static void 1988 accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key) 1989 { 1990 if (key->param.hex_key) { 1991 spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2); 1992 free(key->param.hex_key); 1993 } 1994 if (key->param.hex_key2) { 1995 spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2); 1996 free(key->param.hex_key2); 1997 } 1998 free(key->param.tweak_mode); 1999 free(key->param.key_name); 2000 free(key->param.cipher); 2001 if (key->key) { 2002 spdk_memset_s(key->key, key->key_size, 0, key->key_size); 2003 free(key->key); 2004 } 2005 if (key->key2) { 2006 spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size); 2007 free(key->key2); 2008 } 2009 free(key); 2010 } 2011 2012 static void 2013 accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key) 2014 { 2015 assert(key->module_if); 2016 assert(key->module_if->crypto_key_deinit); 2017 2018 key->module_if->crypto_key_deinit(key); 2019 accel_crypto_key_free_mem(key); 2020 } 2021 2022 /* 2023 * This function mitigates a timing side channel which could be caused by using strcmp() 2024 * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in 2025 * the article [1] for more details 2026 * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html 2027 */ 2028 static bool 2029 accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len) 2030 { 2031 size_t i; 2032 volatile size_t x = k1_len ^ k2_len; 2033 2034 for (i = 0; ((i < k1_len) & (i < k2_len)); i++) { 2035 x |= k1[i] ^ k2[i]; 2036 } 2037 2038 return x == 0; 2039 } 2040 2041 static const char *g_tweak_modes[] = { 2042 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA] = "SIMPLE_LBA", 2043 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_JOIN_NEG_LBA_WITH_LBA] = "JOIN_NEG_LBA_WITH_LBA", 2044 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_FULL_LBA] = "INCR_512_FULL_LBA", 2045 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_UPPER_LBA] = "INCR_512_UPPER_LBA", 2046 }; 2047 2048 static const char *g_ciphers[] = { 2049 [SPDK_ACCEL_CIPHER_AES_CBC] = "AES_CBC", 2050 [SPDK_ACCEL_CIPHER_AES_XTS] = "AES_XTS", 2051 }; 2052 2053 int 2054 spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param) 2055 { 2056 struct spdk_accel_module_if *module; 2057 struct spdk_accel_crypto_key *key; 2058 size_t hex_key_size, hex_key2_size; 2059 bool found = false; 2060 size_t i; 2061 int rc; 2062 2063 if (!param || !param->hex_key || !param->cipher || !param->key_name) { 2064 return -EINVAL; 2065 } 2066 2067 if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) { 2068 /* hardly ever possible, but let's check and warn the user */ 2069 SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n"); 2070 } 2071 module = g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module; 2072 2073 if (!module) { 2074 SPDK_ERRLOG("No accel module found assigned for crypto operation\n"); 2075 return -ENOENT; 2076 } 2077 2078 if (!module->crypto_key_init || !module->crypto_supports_cipher) { 2079 SPDK_ERRLOG("Module %s doesn't support crypto operations\n", module->name); 2080 return -ENOTSUP; 2081 } 2082 2083 key = calloc(1, sizeof(*key)); 2084 if (!key) { 2085 return -ENOMEM; 2086 } 2087 2088 key->param.key_name = strdup(param->key_name); 2089 if (!key->param.key_name) { 2090 rc = -ENOMEM; 2091 goto error; 2092 } 2093 2094 for (i = 0; i < SPDK_COUNTOF(g_ciphers); ++i) { 2095 assert(g_ciphers[i]); 2096 2097 if (strncmp(param->cipher, g_ciphers[i], strlen(g_ciphers[i])) == 0) { 2098 key->cipher = i; 2099 found = true; 2100 break; 2101 } 2102 } 2103 2104 if (!found) { 2105 SPDK_ERRLOG("Failed to parse cipher\n"); 2106 rc = -EINVAL; 2107 goto error; 2108 } 2109 2110 key->param.cipher = strdup(param->cipher); 2111 if (!key->param.cipher) { 2112 rc = -ENOMEM; 2113 goto error; 2114 } 2115 2116 hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2117 if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) { 2118 SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2119 rc = -EINVAL; 2120 goto error; 2121 } 2122 2123 if (hex_key_size == 0) { 2124 SPDK_ERRLOG("key1 size cannot be 0\n"); 2125 rc = -EINVAL; 2126 goto error; 2127 } 2128 2129 key->param.hex_key = strdup(param->hex_key); 2130 if (!key->param.hex_key) { 2131 rc = -ENOMEM; 2132 goto error; 2133 } 2134 2135 key->key_size = hex_key_size / 2; 2136 key->key = spdk_unhexlify(key->param.hex_key); 2137 if (!key->key) { 2138 SPDK_ERRLOG("Failed to unhexlify key1\n"); 2139 rc = -EINVAL; 2140 goto error; 2141 } 2142 2143 if (param->hex_key2) { 2144 hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2145 if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) { 2146 SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2147 rc = -EINVAL; 2148 goto error; 2149 } 2150 2151 if (hex_key2_size == 0) { 2152 SPDK_ERRLOG("key2 size cannot be 0\n"); 2153 rc = -EINVAL; 2154 goto error; 2155 } 2156 2157 key->param.hex_key2 = strdup(param->hex_key2); 2158 if (!key->param.hex_key2) { 2159 rc = -ENOMEM; 2160 goto error; 2161 } 2162 2163 key->key2_size = hex_key2_size / 2; 2164 key->key2 = spdk_unhexlify(key->param.hex_key2); 2165 if (!key->key2) { 2166 SPDK_ERRLOG("Failed to unhexlify key2\n"); 2167 rc = -EINVAL; 2168 goto error; 2169 } 2170 } 2171 2172 key->tweak_mode = ACCEL_CRYPTO_TWEAK_MODE_DEFAULT; 2173 if (param->tweak_mode) { 2174 found = false; 2175 2176 key->param.tweak_mode = strdup(param->tweak_mode); 2177 if (!key->param.tweak_mode) { 2178 rc = -ENOMEM; 2179 goto error; 2180 } 2181 2182 for (i = 0; i < SPDK_COUNTOF(g_tweak_modes); ++i) { 2183 assert(g_tweak_modes[i]); 2184 2185 if (strncmp(param->tweak_mode, g_tweak_modes[i], strlen(g_tweak_modes[i])) == 0) { 2186 key->tweak_mode = i; 2187 found = true; 2188 break; 2189 } 2190 } 2191 2192 if (!found) { 2193 SPDK_ERRLOG("Failed to parse tweak mode\n"); 2194 rc = -EINVAL; 2195 goto error; 2196 } 2197 } 2198 2199 if ((!module->crypto_supports_tweak_mode && key->tweak_mode != ACCEL_CRYPTO_TWEAK_MODE_DEFAULT) || 2200 (module->crypto_supports_tweak_mode && !module->crypto_supports_tweak_mode(key->tweak_mode))) { 2201 SPDK_ERRLOG("Module %s doesn't support %s tweak mode\n", module->name, 2202 g_tweak_modes[key->tweak_mode]); 2203 rc = -EINVAL; 2204 goto error; 2205 } 2206 2207 if (!module->crypto_supports_cipher(key->cipher, key->key_size)) { 2208 SPDK_ERRLOG("Module %s doesn't support %s cipher with %zu key size\n", module->name, 2209 g_ciphers[key->cipher], key->key_size); 2210 rc = -EINVAL; 2211 goto error; 2212 } 2213 2214 if (key->cipher == SPDK_ACCEL_CIPHER_AES_XTS) { 2215 if (!key->key2) { 2216 SPDK_ERRLOG("%s key2 is missing\n", g_ciphers[key->cipher]); 2217 rc = -EINVAL; 2218 goto error; 2219 } 2220 2221 if (key->key_size != key->key2_size) { 2222 SPDK_ERRLOG("%s key size %zu is not equal to key2 size %zu\n", g_ciphers[key->cipher], 2223 key->key_size, 2224 key->key2_size); 2225 rc = -EINVAL; 2226 goto error; 2227 } 2228 2229 if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) { 2230 SPDK_ERRLOG("%s identical keys are not secure\n", g_ciphers[key->cipher]); 2231 rc = -EINVAL; 2232 goto error; 2233 } 2234 } 2235 2236 if (key->cipher == SPDK_ACCEL_CIPHER_AES_CBC) { 2237 if (key->key2_size) { 2238 SPDK_ERRLOG("%s doesn't use key2\n", g_ciphers[key->cipher]); 2239 rc = -EINVAL; 2240 goto error; 2241 } 2242 } 2243 2244 key->module_if = module; 2245 2246 spdk_spin_lock(&g_keyring_spin); 2247 if (_accel_crypto_key_get(param->key_name)) { 2248 rc = -EEXIST; 2249 } else { 2250 rc = module->crypto_key_init(key); 2251 if (rc) { 2252 SPDK_ERRLOG("Module %s failed to initialize crypto key\n", module->name); 2253 } else { 2254 TAILQ_INSERT_TAIL(&g_keyring, key, link); 2255 } 2256 } 2257 spdk_spin_unlock(&g_keyring_spin); 2258 2259 if (rc) { 2260 goto error; 2261 } 2262 2263 return 0; 2264 2265 error: 2266 accel_crypto_key_free_mem(key); 2267 return rc; 2268 } 2269 2270 int 2271 spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key) 2272 { 2273 if (!key || !key->module_if) { 2274 return -EINVAL; 2275 } 2276 2277 spdk_spin_lock(&g_keyring_spin); 2278 if (!_accel_crypto_key_get(key->param.key_name)) { 2279 spdk_spin_unlock(&g_keyring_spin); 2280 return -ENOENT; 2281 } 2282 TAILQ_REMOVE(&g_keyring, key, link); 2283 spdk_spin_unlock(&g_keyring_spin); 2284 2285 accel_crypto_key_destroy_unsafe(key); 2286 2287 return 0; 2288 } 2289 2290 struct spdk_accel_crypto_key * 2291 spdk_accel_crypto_key_get(const char *name) 2292 { 2293 struct spdk_accel_crypto_key *key; 2294 2295 spdk_spin_lock(&g_keyring_spin); 2296 key = _accel_crypto_key_get(name); 2297 spdk_spin_unlock(&g_keyring_spin); 2298 2299 return key; 2300 } 2301 2302 /* Helper function when accel modules register with the framework. */ 2303 void 2304 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module) 2305 { 2306 struct spdk_accel_module_if *tmp; 2307 2308 if (_module_find_by_name(accel_module->name)) { 2309 SPDK_NOTICELOG("Module %s already registered\n", accel_module->name); 2310 assert(false); 2311 return; 2312 } 2313 2314 TAILQ_FOREACH(tmp, &spdk_accel_module_list, tailq) { 2315 if (accel_module->priority < tmp->priority) { 2316 break; 2317 } 2318 } 2319 2320 if (tmp != NULL) { 2321 TAILQ_INSERT_BEFORE(tmp, accel_module, tailq); 2322 } else { 2323 TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq); 2324 } 2325 } 2326 2327 /* Framework level channel create callback. */ 2328 static int 2329 accel_create_channel(void *io_device, void *ctx_buf) 2330 { 2331 struct accel_io_channel *accel_ch = ctx_buf; 2332 struct spdk_accel_task *accel_task; 2333 struct spdk_accel_sequence *seq; 2334 struct accel_buffer *buf; 2335 uint8_t *task_mem; 2336 uint32_t i = 0, j; 2337 int rc; 2338 2339 accel_ch->task_pool_base = calloc(g_opts.task_count, g_max_accel_module_size); 2340 if (accel_ch->task_pool_base == NULL) { 2341 return -ENOMEM; 2342 } 2343 2344 accel_ch->seq_pool_base = calloc(g_opts.sequence_count, sizeof(struct spdk_accel_sequence)); 2345 if (accel_ch->seq_pool_base == NULL) { 2346 goto err; 2347 } 2348 2349 accel_ch->buf_pool_base = calloc(g_opts.buf_count, sizeof(struct accel_buffer)); 2350 if (accel_ch->buf_pool_base == NULL) { 2351 goto err; 2352 } 2353 2354 TAILQ_INIT(&accel_ch->task_pool); 2355 TAILQ_INIT(&accel_ch->seq_pool); 2356 TAILQ_INIT(&accel_ch->buf_pool); 2357 2358 task_mem = accel_ch->task_pool_base; 2359 for (i = 0; i < g_opts.task_count; i++) { 2360 accel_task = (struct spdk_accel_task *)task_mem; 2361 TAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link); 2362 task_mem += g_max_accel_module_size; 2363 } 2364 for (i = 0; i < g_opts.sequence_count; i++) { 2365 seq = &accel_ch->seq_pool_base[i]; 2366 TAILQ_INSERT_TAIL(&accel_ch->seq_pool, seq, link); 2367 } 2368 for (i = 0; i < g_opts.buf_count; i++) { 2369 buf = &accel_ch->buf_pool_base[i]; 2370 TAILQ_INSERT_TAIL(&accel_ch->buf_pool, buf, link); 2371 } 2372 2373 /* Assign modules and get IO channels for each */ 2374 for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) { 2375 accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel(); 2376 /* This can happen if idxd runs out of channels. */ 2377 if (accel_ch->module_ch[i] == NULL) { 2378 SPDK_ERRLOG("Module %s failed to get io channel\n", g_modules_opc[i].module->name); 2379 goto err; 2380 } 2381 } 2382 2383 if (g_accel_driver != NULL) { 2384 accel_ch->driver_channel = g_accel_driver->get_io_channel(); 2385 if (accel_ch->driver_channel == NULL) { 2386 SPDK_ERRLOG("Failed to get driver's IO channel\n"); 2387 goto err; 2388 } 2389 } 2390 2391 rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", g_opts.small_cache_size, 2392 g_opts.large_cache_size); 2393 if (rc != 0) { 2394 SPDK_ERRLOG("Failed to initialize iobuf accel channel\n"); 2395 goto err; 2396 } 2397 2398 return 0; 2399 err: 2400 if (accel_ch->driver_channel != NULL) { 2401 spdk_put_io_channel(accel_ch->driver_channel); 2402 } 2403 for (j = 0; j < i; j++) { 2404 spdk_put_io_channel(accel_ch->module_ch[j]); 2405 } 2406 free(accel_ch->task_pool_base); 2407 free(accel_ch->seq_pool_base); 2408 free(accel_ch->buf_pool_base); 2409 2410 return -ENOMEM; 2411 } 2412 2413 static void 2414 accel_add_stats(struct accel_stats *total, struct accel_stats *stats) 2415 { 2416 int i; 2417 2418 total->sequence_executed += stats->sequence_executed; 2419 total->sequence_failed += stats->sequence_failed; 2420 total->retry.task += stats->retry.task; 2421 total->retry.sequence += stats->retry.sequence; 2422 total->retry.iobuf += stats->retry.iobuf; 2423 total->retry.bufdesc += stats->retry.bufdesc; 2424 for (i = 0; i < SPDK_ACCEL_OPC_LAST; ++i) { 2425 total->operations[i].executed += stats->operations[i].executed; 2426 total->operations[i].failed += stats->operations[i].failed; 2427 total->operations[i].num_bytes += stats->operations[i].num_bytes; 2428 } 2429 } 2430 2431 /* Framework level channel destroy callback. */ 2432 static void 2433 accel_destroy_channel(void *io_device, void *ctx_buf) 2434 { 2435 struct accel_io_channel *accel_ch = ctx_buf; 2436 int i; 2437 2438 spdk_iobuf_channel_fini(&accel_ch->iobuf); 2439 2440 if (accel_ch->driver_channel != NULL) { 2441 spdk_put_io_channel(accel_ch->driver_channel); 2442 } 2443 2444 for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) { 2445 assert(accel_ch->module_ch[i] != NULL); 2446 spdk_put_io_channel(accel_ch->module_ch[i]); 2447 accel_ch->module_ch[i] = NULL; 2448 } 2449 2450 /* Update global stats to make sure channel's stats aren't lost after a channel is gone */ 2451 spdk_spin_lock(&g_stats_lock); 2452 accel_add_stats(&g_stats, &accel_ch->stats); 2453 spdk_spin_unlock(&g_stats_lock); 2454 2455 free(accel_ch->task_pool_base); 2456 free(accel_ch->seq_pool_base); 2457 free(accel_ch->buf_pool_base); 2458 } 2459 2460 struct spdk_io_channel * 2461 spdk_accel_get_io_channel(void) 2462 { 2463 return spdk_get_io_channel(&spdk_accel_module_list); 2464 } 2465 2466 static int 2467 accel_module_initialize(void) 2468 { 2469 struct spdk_accel_module_if *accel_module, *tmp_module; 2470 int rc = 0, module_rc; 2471 2472 TAILQ_FOREACH_SAFE(accel_module, &spdk_accel_module_list, tailq, tmp_module) { 2473 module_rc = accel_module->module_init(); 2474 if (module_rc) { 2475 SPDK_ERRLOG("Module %s initialization failed with %d\n", accel_module->name, module_rc); 2476 TAILQ_REMOVE(&spdk_accel_module_list, accel_module, tailq); 2477 if (!rc) { 2478 rc = module_rc; 2479 } 2480 } 2481 2482 SPDK_DEBUGLOG(accel, "Module %s initialized.\n", accel_module->name); 2483 } 2484 2485 return rc; 2486 } 2487 2488 static void 2489 accel_module_init_opcode(enum spdk_accel_opcode opcode) 2490 { 2491 struct accel_module *module = &g_modules_opc[opcode]; 2492 struct spdk_accel_module_if *module_if = module->module; 2493 2494 if (module_if->get_memory_domains != NULL) { 2495 module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0; 2496 } 2497 } 2498 2499 int 2500 spdk_accel_initialize(void) 2501 { 2502 enum spdk_accel_opcode op; 2503 struct spdk_accel_module_if *accel_module = NULL; 2504 int rc; 2505 2506 /* 2507 * We need a unique identifier for the accel framework, so use the 2508 * spdk_accel_module_list address for this purpose. 2509 */ 2510 spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel, 2511 sizeof(struct accel_io_channel), "accel"); 2512 2513 spdk_spin_init(&g_keyring_spin); 2514 spdk_spin_init(&g_stats_lock); 2515 2516 rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL, 2517 "SPDK_ACCEL_DMA_DEVICE"); 2518 if (rc != 0) { 2519 SPDK_ERRLOG("Failed to create accel memory domain\n"); 2520 return rc; 2521 } 2522 2523 g_modules_started = true; 2524 rc = accel_module_initialize(); 2525 if (rc) { 2526 return rc; 2527 } 2528 2529 /* The module list is order by priority, with the highest priority modules being at the end 2530 * of the list. The software module should be somewhere at the beginning of the list, 2531 * before all HW modules. 2532 * NOTE: all opcodes must be supported by software in the event that no HW modules are 2533 * initialized to support the operation. 2534 */ 2535 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 2536 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 2537 if (accel_module->supports_opcode(op)) { 2538 g_modules_opc[op].module = accel_module; 2539 SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name); 2540 } 2541 } 2542 2543 if (accel_module->get_ctx_size != NULL) { 2544 g_max_accel_module_size = spdk_max(g_max_accel_module_size, 2545 accel_module->get_ctx_size()); 2546 } 2547 } 2548 2549 /* Now lets check for overrides and apply all that exist */ 2550 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 2551 if (g_modules_opc_override[op] != NULL) { 2552 accel_module = _module_find_by_name(g_modules_opc_override[op]); 2553 if (accel_module == NULL) { 2554 SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]); 2555 return -EINVAL; 2556 } 2557 if (accel_module->supports_opcode(op) == false) { 2558 SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op); 2559 return -EINVAL; 2560 } 2561 g_modules_opc[op].module = accel_module; 2562 } 2563 } 2564 2565 if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) { 2566 SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations"); 2567 return -EINVAL; 2568 } 2569 2570 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 2571 assert(g_modules_opc[op].module != NULL); 2572 accel_module_init_opcode(op); 2573 } 2574 2575 rc = spdk_iobuf_register_module("accel"); 2576 if (rc != 0) { 2577 SPDK_ERRLOG("Failed to register accel iobuf module\n"); 2578 return rc; 2579 } 2580 2581 return 0; 2582 } 2583 2584 static void 2585 accel_module_finish_cb(void) 2586 { 2587 spdk_accel_fini_cb cb_fn = g_fini_cb_fn; 2588 2589 cb_fn(g_fini_cb_arg); 2590 g_fini_cb_fn = NULL; 2591 g_fini_cb_arg = NULL; 2592 } 2593 2594 static void 2595 accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str, 2596 const char *module_str) 2597 { 2598 spdk_json_write_object_begin(w); 2599 spdk_json_write_named_string(w, "method", "accel_assign_opc"); 2600 spdk_json_write_named_object_begin(w, "params"); 2601 spdk_json_write_named_string(w, "opname", opc_str); 2602 spdk_json_write_named_string(w, "module", module_str); 2603 spdk_json_write_object_end(w); 2604 spdk_json_write_object_end(w); 2605 } 2606 2607 static void 2608 __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key) 2609 { 2610 spdk_json_write_named_string(w, "name", key->param.key_name); 2611 spdk_json_write_named_string(w, "cipher", key->param.cipher); 2612 spdk_json_write_named_string(w, "key", key->param.hex_key); 2613 if (key->param.hex_key2) { 2614 spdk_json_write_named_string(w, "key2", key->param.hex_key2); 2615 } 2616 2617 if (key->param.tweak_mode) { 2618 spdk_json_write_named_string(w, "tweak_mode", key->param.tweak_mode); 2619 } 2620 } 2621 2622 void 2623 _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key) 2624 { 2625 spdk_json_write_object_begin(w); 2626 __accel_crypto_key_dump_param(w, key); 2627 spdk_json_write_object_end(w); 2628 } 2629 2630 static void 2631 _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w, 2632 struct spdk_accel_crypto_key *key) 2633 { 2634 spdk_json_write_object_begin(w); 2635 spdk_json_write_named_string(w, "method", "accel_crypto_key_create"); 2636 spdk_json_write_named_object_begin(w, "params"); 2637 __accel_crypto_key_dump_param(w, key); 2638 spdk_json_write_object_end(w); 2639 spdk_json_write_object_end(w); 2640 } 2641 2642 static void 2643 accel_write_options(struct spdk_json_write_ctx *w) 2644 { 2645 spdk_json_write_object_begin(w); 2646 spdk_json_write_named_string(w, "method", "accel_set_options"); 2647 spdk_json_write_named_object_begin(w, "params"); 2648 spdk_json_write_named_uint32(w, "small_cache_size", g_opts.small_cache_size); 2649 spdk_json_write_named_uint32(w, "large_cache_size", g_opts.large_cache_size); 2650 spdk_json_write_named_uint32(w, "task_count", g_opts.task_count); 2651 spdk_json_write_named_uint32(w, "sequence_count", g_opts.sequence_count); 2652 spdk_json_write_named_uint32(w, "buf_count", g_opts.buf_count); 2653 spdk_json_write_object_end(w); 2654 spdk_json_write_object_end(w); 2655 } 2656 2657 static void 2658 _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump) 2659 { 2660 struct spdk_accel_crypto_key *key; 2661 2662 spdk_spin_lock(&g_keyring_spin); 2663 TAILQ_FOREACH(key, &g_keyring, link) { 2664 if (full_dump) { 2665 _accel_crypto_key_write_config_json(w, key); 2666 } else { 2667 _accel_crypto_key_dump_param(w, key); 2668 } 2669 } 2670 spdk_spin_unlock(&g_keyring_spin); 2671 } 2672 2673 void 2674 _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w) 2675 { 2676 _accel_crypto_keys_write_config_json(w, false); 2677 } 2678 2679 void 2680 spdk_accel_write_config_json(struct spdk_json_write_ctx *w) 2681 { 2682 struct spdk_accel_module_if *accel_module; 2683 int i; 2684 2685 spdk_json_write_array_begin(w); 2686 accel_write_options(w); 2687 2688 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 2689 if (accel_module->write_config_json) { 2690 accel_module->write_config_json(w); 2691 } 2692 } 2693 for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) { 2694 if (g_modules_opc_override[i]) { 2695 accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]); 2696 } 2697 } 2698 2699 _accel_crypto_keys_write_config_json(w, true); 2700 2701 spdk_json_write_array_end(w); 2702 } 2703 2704 void 2705 spdk_accel_module_finish(void) 2706 { 2707 if (!g_accel_module) { 2708 g_accel_module = TAILQ_FIRST(&spdk_accel_module_list); 2709 } else { 2710 g_accel_module = TAILQ_NEXT(g_accel_module, tailq); 2711 } 2712 2713 if (!g_accel_module) { 2714 spdk_spin_destroy(&g_keyring_spin); 2715 spdk_spin_destroy(&g_stats_lock); 2716 if (g_accel_domain) { 2717 spdk_memory_domain_destroy(g_accel_domain); 2718 g_accel_domain = NULL; 2719 } 2720 accel_module_finish_cb(); 2721 return; 2722 } 2723 2724 if (g_accel_module->module_fini) { 2725 spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL); 2726 } else { 2727 spdk_accel_module_finish(); 2728 } 2729 } 2730 2731 static void 2732 accel_io_device_unregister_cb(void *io_device) 2733 { 2734 struct spdk_accel_crypto_key *key, *key_tmp; 2735 enum spdk_accel_opcode op; 2736 2737 spdk_spin_lock(&g_keyring_spin); 2738 TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) { 2739 accel_crypto_key_destroy_unsafe(key); 2740 } 2741 spdk_spin_unlock(&g_keyring_spin); 2742 2743 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 2744 if (g_modules_opc_override[op] != NULL) { 2745 free(g_modules_opc_override[op]); 2746 g_modules_opc_override[op] = NULL; 2747 } 2748 g_modules_opc[op].module = NULL; 2749 } 2750 2751 spdk_accel_module_finish(); 2752 } 2753 2754 void 2755 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg) 2756 { 2757 assert(cb_fn != NULL); 2758 2759 g_fini_cb_fn = cb_fn; 2760 g_fini_cb_arg = cb_arg; 2761 2762 spdk_io_device_unregister(&spdk_accel_module_list, accel_io_device_unregister_cb); 2763 } 2764 2765 static struct spdk_accel_driver * 2766 accel_find_driver(const char *name) 2767 { 2768 struct spdk_accel_driver *driver; 2769 2770 TAILQ_FOREACH(driver, &g_accel_drivers, tailq) { 2771 if (strcmp(driver->name, name) == 0) { 2772 return driver; 2773 } 2774 } 2775 2776 return NULL; 2777 } 2778 2779 int 2780 spdk_accel_set_driver(const char *name) 2781 { 2782 struct spdk_accel_driver *driver; 2783 2784 driver = accel_find_driver(name); 2785 if (driver == NULL) { 2786 SPDK_ERRLOG("Couldn't find driver named '%s'\n", name); 2787 return -ENODEV; 2788 } 2789 2790 g_accel_driver = driver; 2791 2792 return 0; 2793 } 2794 2795 void 2796 spdk_accel_driver_register(struct spdk_accel_driver *driver) 2797 { 2798 if (accel_find_driver(driver->name)) { 2799 SPDK_ERRLOG("Driver named '%s' has already been registered\n", driver->name); 2800 assert(0); 2801 return; 2802 } 2803 2804 TAILQ_INSERT_TAIL(&g_accel_drivers, driver, tailq); 2805 } 2806 2807 int 2808 spdk_accel_set_opts(const struct spdk_accel_opts *opts) 2809 { 2810 if (opts->size > sizeof(*opts)) { 2811 return -EINVAL; 2812 } 2813 2814 memcpy(&g_opts, opts, opts->size); 2815 2816 return 0; 2817 } 2818 2819 void 2820 spdk_accel_get_opts(struct spdk_accel_opts *opts) 2821 { 2822 size_t size = opts->size; 2823 2824 assert(size <= sizeof(*opts)); 2825 2826 memcpy(opts, &g_opts, spdk_min(sizeof(*opts), size)); 2827 opts->size = size; 2828 } 2829 2830 struct accel_get_stats_ctx { 2831 struct accel_stats stats; 2832 accel_get_stats_cb cb_fn; 2833 void *cb_arg; 2834 }; 2835 2836 static void 2837 accel_get_channel_stats_done(struct spdk_io_channel_iter *iter, int status) 2838 { 2839 struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter); 2840 2841 ctx->cb_fn(&ctx->stats, ctx->cb_arg); 2842 free(ctx); 2843 } 2844 2845 static void 2846 accel_get_channel_stats(struct spdk_io_channel_iter *iter) 2847 { 2848 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(iter); 2849 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 2850 struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter); 2851 2852 accel_add_stats(&ctx->stats, &accel_ch->stats); 2853 spdk_for_each_channel_continue(iter, 0); 2854 } 2855 2856 int 2857 accel_get_stats(accel_get_stats_cb cb_fn, void *cb_arg) 2858 { 2859 struct accel_get_stats_ctx *ctx; 2860 2861 ctx = calloc(1, sizeof(*ctx)); 2862 if (ctx == NULL) { 2863 return -ENOMEM; 2864 } 2865 2866 spdk_spin_lock(&g_stats_lock); 2867 accel_add_stats(&ctx->stats, &g_stats); 2868 spdk_spin_unlock(&g_stats_lock); 2869 2870 ctx->cb_fn = cb_fn; 2871 ctx->cb_arg = cb_arg; 2872 2873 spdk_for_each_channel(&spdk_accel_module_list, accel_get_channel_stats, ctx, 2874 accel_get_channel_stats_done); 2875 2876 return 0; 2877 } 2878 2879 void 2880 spdk_accel_get_opcode_stats(struct spdk_io_channel *ch, enum spdk_accel_opcode opcode, 2881 struct spdk_accel_opcode_stats *stats, size_t size) 2882 { 2883 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 2884 2885 #define FIELD_OK(field) \ 2886 offsetof(struct spdk_accel_opcode_stats, field) + sizeof(stats->field) <= size 2887 2888 #define SET_FIELD(field, value) \ 2889 if (FIELD_OK(field)) { \ 2890 stats->field = value; \ 2891 } 2892 2893 SET_FIELD(executed, accel_ch->stats.operations[opcode].executed); 2894 SET_FIELD(failed, accel_ch->stats.operations[opcode].failed); 2895 SET_FIELD(num_bytes, accel_ch->stats.operations[opcode].num_bytes); 2896 2897 #undef FIELD_OK 2898 #undef SET_FIELD 2899 } 2900 2901 uint8_t 2902 spdk_accel_get_buf_align(enum spdk_accel_opcode opcode, 2903 const struct spdk_accel_operation_exec_ctx *ctx) 2904 { 2905 struct spdk_accel_module_if *module = g_modules_opc[opcode].module; 2906 struct spdk_accel_opcode_info modinfo = {}, drvinfo = {}; 2907 2908 if (g_accel_driver != NULL && g_accel_driver->get_operation_info != NULL) { 2909 g_accel_driver->get_operation_info(opcode, ctx, &drvinfo); 2910 } 2911 2912 if (module->get_operation_info != NULL) { 2913 module->get_operation_info(opcode, ctx, &modinfo); 2914 } 2915 2916 /* If a driver is set, it'll execute most of the operations, while the rest will usually 2917 * fall back to accel_sw, which doesn't have any alignment requiremenets. However, to be 2918 * extra safe, return the max(driver, module) if a driver delegates some operations to a 2919 * hardware module. */ 2920 return spdk_max(modinfo.required_alignment, drvinfo.required_alignment); 2921 } 2922 2923 struct spdk_accel_module_if * 2924 spdk_accel_get_module(const char *name) 2925 { 2926 struct spdk_accel_module_if *module; 2927 2928 TAILQ_FOREACH(module, &spdk_accel_module_list, tailq) { 2929 if (strcmp(module->name, name) == 0) { 2930 return module; 2931 } 2932 } 2933 2934 return NULL; 2935 } 2936 2937 SPDK_LOG_REGISTER_COMPONENT(accel) 2938