1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2020 Intel Corporation. 3 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. 4 * All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk/accel_module.h" 10 11 #include "accel_internal.h" 12 13 #include "spdk/dma.h" 14 #include "spdk/env.h" 15 #include "spdk/likely.h" 16 #include "spdk/log.h" 17 #include "spdk/thread.h" 18 #include "spdk/json.h" 19 #include "spdk/crc32.h" 20 #include "spdk/util.h" 21 #include "spdk/hexlify.h" 22 23 /* Accelerator Framework: The following provides a top level 24 * generic API for the accelerator functions defined here. Modules, 25 * such as the one in /module/accel/ioat, supply the implementation 26 * with the exception of the pure software implementation contained 27 * later in this file. 28 */ 29 30 #define ALIGN_4K 0x1000 31 #define MAX_TASKS_PER_CHANNEL 0x800 32 #define ACCEL_SMALL_CACHE_SIZE 128 33 #define ACCEL_LARGE_CACHE_SIZE 16 34 /* Set MSB, so we don't return NULL pointers as buffers */ 35 #define ACCEL_BUFFER_BASE ((void *)(1ull << 63)) 36 #define ACCEL_BUFFER_OFFSET_MASK ((uintptr_t)ACCEL_BUFFER_BASE - 1) 37 38 struct accel_module { 39 struct spdk_accel_module_if *module; 40 bool supports_memory_domains; 41 }; 42 43 /* Largest context size for all accel modules */ 44 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task); 45 46 static struct spdk_accel_module_if *g_accel_module = NULL; 47 static spdk_accel_fini_cb g_fini_cb_fn = NULL; 48 static void *g_fini_cb_arg = NULL; 49 static bool g_modules_started = false; 50 static struct spdk_memory_domain *g_accel_domain; 51 52 /* Global list of registered accelerator modules */ 53 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list = 54 TAILQ_HEAD_INITIALIZER(spdk_accel_module_list); 55 56 /* Crypto keyring */ 57 static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring); 58 static struct spdk_spinlock g_keyring_spin; 59 60 /* Global array mapping capabilities to modules */ 61 static struct accel_module g_modules_opc[ACCEL_OPC_LAST] = {}; 62 static char *g_modules_opc_override[ACCEL_OPC_LAST] = {}; 63 TAILQ_HEAD(, spdk_accel_driver) g_accel_drivers = TAILQ_HEAD_INITIALIZER(g_accel_drivers); 64 static struct spdk_accel_driver *g_accel_driver; 65 static struct spdk_accel_opts g_opts = { 66 .small_cache_size = ACCEL_SMALL_CACHE_SIZE, 67 .large_cache_size = ACCEL_LARGE_CACHE_SIZE, 68 .task_count = MAX_TASKS_PER_CHANNEL, 69 .sequence_count = MAX_TASKS_PER_CHANNEL, 70 .buf_count = MAX_TASKS_PER_CHANNEL, 71 }; 72 static struct accel_stats g_stats; 73 static struct spdk_spinlock g_stats_lock; 74 75 static const char *g_opcode_strings[ACCEL_OPC_LAST] = { 76 "copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c", 77 "compress", "decompress", "encrypt", "decrypt", "xor" 78 }; 79 80 enum accel_sequence_state { 81 ACCEL_SEQUENCE_STATE_INIT, 82 ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF, 83 ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF, 84 ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF, 85 ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF, 86 ACCEL_SEQUENCE_STATE_PULL_DATA, 87 ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA, 88 ACCEL_SEQUENCE_STATE_EXEC_TASK, 89 ACCEL_SEQUENCE_STATE_AWAIT_TASK, 90 ACCEL_SEQUENCE_STATE_COMPLETE_TASK, 91 ACCEL_SEQUENCE_STATE_NEXT_TASK, 92 ACCEL_SEQUENCE_STATE_PUSH_DATA, 93 ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA, 94 ACCEL_SEQUENCE_STATE_DRIVER_EXEC, 95 ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK, 96 ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE, 97 ACCEL_SEQUENCE_STATE_ERROR, 98 ACCEL_SEQUENCE_STATE_MAX, 99 }; 100 101 static const char *g_seq_states[] 102 __attribute__((unused)) = { 103 [ACCEL_SEQUENCE_STATE_INIT] = "init", 104 [ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf", 105 [ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf", 106 [ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf", 107 [ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf", 108 [ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data", 109 [ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data", 110 [ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task", 111 [ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task", 112 [ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task", 113 [ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task", 114 [ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data", 115 [ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data", 116 [ACCEL_SEQUENCE_STATE_DRIVER_EXEC] = "driver-exec", 117 [ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK] = "driver-await-task", 118 [ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE] = "driver-complete", 119 [ACCEL_SEQUENCE_STATE_ERROR] = "error", 120 [ACCEL_SEQUENCE_STATE_MAX] = "", 121 }; 122 123 #define ACCEL_SEQUENCE_STATE_STRING(s) \ 124 (((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \ 125 ? g_seq_states[s] : "unknown") 126 127 struct accel_buffer { 128 struct spdk_accel_sequence *seq; 129 void *buf; 130 uint64_t len; 131 struct spdk_iobuf_entry iobuf; 132 spdk_accel_sequence_get_buf_cb cb_fn; 133 void *cb_ctx; 134 TAILQ_ENTRY(accel_buffer) link; 135 }; 136 137 struct accel_io_channel { 138 struct spdk_io_channel *module_ch[ACCEL_OPC_LAST]; 139 void *task_pool_base; 140 struct spdk_accel_sequence *seq_pool_base; 141 struct accel_buffer *buf_pool_base; 142 TAILQ_HEAD(, spdk_accel_task) task_pool; 143 TAILQ_HEAD(, spdk_accel_sequence) seq_pool; 144 TAILQ_HEAD(, accel_buffer) buf_pool; 145 struct spdk_iobuf_channel iobuf; 146 struct accel_stats stats; 147 }; 148 149 TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task); 150 151 struct spdk_accel_sequence { 152 struct accel_io_channel *ch; 153 struct accel_sequence_tasks tasks; 154 struct accel_sequence_tasks completed; 155 TAILQ_HEAD(, accel_buffer) bounce_bufs; 156 enum accel_sequence_state state; 157 int status; 158 bool in_process_sequence; 159 spdk_accel_completion_cb cb_fn; 160 void *cb_arg; 161 TAILQ_ENTRY(spdk_accel_sequence) link; 162 }; 163 164 #define accel_update_stats(ch, event, v) \ 165 do { \ 166 (ch)->stats.event += (v); \ 167 } while (0) 168 169 #define accel_update_task_stats(ch, task, event, v) \ 170 accel_update_stats(ch, operations[(task)->op_code].event, v) 171 172 static inline void 173 accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state) 174 { 175 SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq, 176 ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state)); 177 assert(seq->state != ACCEL_SEQUENCE_STATE_ERROR || state == ACCEL_SEQUENCE_STATE_ERROR); 178 seq->state = state; 179 } 180 181 static void 182 accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status) 183 { 184 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR); 185 assert(status != 0); 186 seq->status = status; 187 } 188 189 int 190 spdk_accel_get_opc_module_name(enum accel_opcode opcode, const char **module_name) 191 { 192 if (opcode >= ACCEL_OPC_LAST) { 193 /* invalid opcode */ 194 return -EINVAL; 195 } 196 197 if (g_modules_opc[opcode].module) { 198 *module_name = g_modules_opc[opcode].module->name; 199 } else { 200 return -ENOENT; 201 } 202 203 return 0; 204 } 205 206 void 207 _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn) 208 { 209 struct spdk_accel_module_if *accel_module; 210 enum accel_opcode opcode; 211 int j = 0; 212 213 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 214 for (opcode = 0; opcode < ACCEL_OPC_LAST; opcode++) { 215 if (accel_module->supports_opcode(opcode)) { 216 info->ops[j] = opcode; 217 j++; 218 } 219 } 220 info->name = accel_module->name; 221 info->num_ops = j; 222 fn(info); 223 j = 0; 224 } 225 } 226 227 int 228 _accel_get_opc_name(enum accel_opcode opcode, const char **opcode_name) 229 { 230 int rc = 0; 231 232 if (opcode < ACCEL_OPC_LAST) { 233 *opcode_name = g_opcode_strings[opcode]; 234 } else { 235 /* invalid opcode */ 236 rc = -EINVAL; 237 } 238 239 return rc; 240 } 241 242 int 243 spdk_accel_assign_opc(enum accel_opcode opcode, const char *name) 244 { 245 if (g_modules_started == true) { 246 /* we don't allow re-assignment once things have started */ 247 return -EINVAL; 248 } 249 250 if (opcode >= ACCEL_OPC_LAST) { 251 /* invalid opcode */ 252 return -EINVAL; 253 } 254 255 /* module selection will be validated after the framework starts. */ 256 g_modules_opc_override[opcode] = strdup(name); 257 258 return 0; 259 } 260 261 void 262 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status) 263 { 264 struct accel_io_channel *accel_ch = accel_task->accel_ch; 265 spdk_accel_completion_cb cb_fn = accel_task->cb_fn; 266 void *cb_arg = accel_task->cb_arg; 267 268 /* We should put the accel_task into the list firstly in order to avoid 269 * the accel task list is exhausted when there is recursive call to 270 * allocate accel_task in user's call back function (cb_fn) 271 */ 272 TAILQ_INSERT_HEAD(&accel_ch->task_pool, accel_task, link); 273 274 accel_update_task_stats(accel_ch, accel_task, executed, 1); 275 accel_update_task_stats(accel_ch, accel_task, num_bytes, accel_task->nbytes); 276 if (spdk_unlikely(status != 0)) { 277 accel_update_task_stats(accel_ch, accel_task, failed, 1); 278 } 279 280 cb_fn(cb_arg, status); 281 } 282 283 inline static struct spdk_accel_task * 284 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg) 285 { 286 struct spdk_accel_task *accel_task; 287 288 accel_task = TAILQ_FIRST(&accel_ch->task_pool); 289 if (accel_task == NULL) { 290 return NULL; 291 } 292 293 TAILQ_REMOVE(&accel_ch->task_pool, accel_task, link); 294 accel_task->link.tqe_next = NULL; 295 accel_task->link.tqe_prev = NULL; 296 297 accel_task->cb_fn = cb_fn; 298 accel_task->cb_arg = cb_arg; 299 accel_task->accel_ch = accel_ch; 300 accel_task->bounce.s.orig_iovs = NULL; 301 accel_task->bounce.d.orig_iovs = NULL; 302 303 return accel_task; 304 } 305 306 static inline int 307 accel_submit_task(struct accel_io_channel *accel_ch, struct spdk_accel_task *task) 308 { 309 struct spdk_io_channel *module_ch = accel_ch->module_ch[task->op_code]; 310 struct spdk_accel_module_if *module = g_modules_opc[task->op_code].module; 311 int rc; 312 313 rc = module->submit_tasks(module_ch, task); 314 if (spdk_unlikely(rc != 0)) { 315 accel_update_task_stats(accel_ch, task, failed, 1); 316 } 317 318 return rc; 319 } 320 321 static inline uint64_t 322 accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt) 323 { 324 uint64_t result = 0; 325 uint32_t i; 326 327 for (i = 0; i < iovcnt; ++i) { 328 result += iovs[i].iov_len; 329 } 330 331 return result; 332 } 333 334 /* Accel framework public API for copy function */ 335 int 336 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src, 337 uint64_t nbytes, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg) 338 { 339 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 340 struct spdk_accel_task *accel_task; 341 342 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 343 if (accel_task == NULL) { 344 return -ENOMEM; 345 } 346 347 accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC]; 348 accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST]; 349 accel_task->d.iovs[0].iov_base = dst; 350 accel_task->d.iovs[0].iov_len = nbytes; 351 accel_task->d.iovcnt = 1; 352 accel_task->s.iovs[0].iov_base = src; 353 accel_task->s.iovs[0].iov_len = nbytes; 354 accel_task->s.iovcnt = 1; 355 accel_task->nbytes = nbytes; 356 accel_task->op_code = ACCEL_OPC_COPY; 357 accel_task->flags = flags; 358 accel_task->src_domain = NULL; 359 accel_task->dst_domain = NULL; 360 accel_task->step_cb_fn = NULL; 361 362 return accel_submit_task(accel_ch, accel_task); 363 } 364 365 /* Accel framework public API for dual cast copy function */ 366 int 367 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1, 368 void *dst2, void *src, uint64_t nbytes, int flags, 369 spdk_accel_completion_cb cb_fn, void *cb_arg) 370 { 371 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 372 struct spdk_accel_task *accel_task; 373 374 if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) { 375 SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n"); 376 return -EINVAL; 377 } 378 379 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 380 if (accel_task == NULL) { 381 return -ENOMEM; 382 } 383 384 accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC]; 385 accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST]; 386 accel_task->d2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST2]; 387 accel_task->d.iovs[0].iov_base = dst1; 388 accel_task->d.iovs[0].iov_len = nbytes; 389 accel_task->d.iovcnt = 1; 390 accel_task->d2.iovs[0].iov_base = dst2; 391 accel_task->d2.iovs[0].iov_len = nbytes; 392 accel_task->d2.iovcnt = 1; 393 accel_task->s.iovs[0].iov_base = src; 394 accel_task->s.iovs[0].iov_len = nbytes; 395 accel_task->s.iovcnt = 1; 396 accel_task->nbytes = nbytes; 397 accel_task->flags = flags; 398 accel_task->op_code = ACCEL_OPC_DUALCAST; 399 accel_task->src_domain = NULL; 400 accel_task->dst_domain = NULL; 401 accel_task->step_cb_fn = NULL; 402 403 return accel_submit_task(accel_ch, accel_task); 404 } 405 406 /* Accel framework public API for compare function */ 407 int 408 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1, 409 void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn, 410 void *cb_arg) 411 { 412 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 413 struct spdk_accel_task *accel_task; 414 415 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 416 if (accel_task == NULL) { 417 return -ENOMEM; 418 } 419 420 accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC]; 421 accel_task->s2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC2]; 422 accel_task->s.iovs[0].iov_base = src1; 423 accel_task->s.iovs[0].iov_len = nbytes; 424 accel_task->s.iovcnt = 1; 425 accel_task->s2.iovs[0].iov_base = src2; 426 accel_task->s2.iovs[0].iov_len = nbytes; 427 accel_task->s2.iovcnt = 1; 428 accel_task->nbytes = nbytes; 429 accel_task->op_code = ACCEL_OPC_COMPARE; 430 accel_task->src_domain = NULL; 431 accel_task->dst_domain = NULL; 432 accel_task->step_cb_fn = NULL; 433 434 return accel_submit_task(accel_ch, accel_task); 435 } 436 437 /* Accel framework public API for fill function */ 438 int 439 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst, 440 uint8_t fill, uint64_t nbytes, int flags, 441 spdk_accel_completion_cb cb_fn, void *cb_arg) 442 { 443 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 444 struct spdk_accel_task *accel_task; 445 446 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 447 if (accel_task == NULL) { 448 return -ENOMEM; 449 } 450 451 accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST]; 452 accel_task->d.iovs[0].iov_base = dst; 453 accel_task->d.iovs[0].iov_len = nbytes; 454 accel_task->d.iovcnt = 1; 455 accel_task->nbytes = nbytes; 456 memset(&accel_task->fill_pattern, fill, sizeof(uint64_t)); 457 accel_task->flags = flags; 458 accel_task->op_code = ACCEL_OPC_FILL; 459 accel_task->src_domain = NULL; 460 accel_task->dst_domain = NULL; 461 accel_task->step_cb_fn = NULL; 462 463 return accel_submit_task(accel_ch, accel_task); 464 } 465 466 /* Accel framework public API for CRC-32C function */ 467 int 468 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst, 469 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn, 470 void *cb_arg) 471 { 472 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 473 struct spdk_accel_task *accel_task; 474 475 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 476 if (accel_task == NULL) { 477 return -ENOMEM; 478 } 479 480 accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC]; 481 accel_task->s.iovs[0].iov_base = src; 482 accel_task->s.iovs[0].iov_len = nbytes; 483 accel_task->s.iovcnt = 1; 484 accel_task->nbytes = nbytes; 485 accel_task->crc_dst = crc_dst; 486 accel_task->seed = seed; 487 accel_task->op_code = ACCEL_OPC_CRC32C; 488 accel_task->src_domain = NULL; 489 accel_task->dst_domain = NULL; 490 accel_task->step_cb_fn = NULL; 491 492 return accel_submit_task(accel_ch, accel_task); 493 } 494 495 /* Accel framework public API for chained CRC-32C function */ 496 int 497 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst, 498 struct iovec *iov, uint32_t iov_cnt, uint32_t seed, 499 spdk_accel_completion_cb cb_fn, void *cb_arg) 500 { 501 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 502 struct spdk_accel_task *accel_task; 503 504 if (iov == NULL) { 505 SPDK_ERRLOG("iov should not be NULL"); 506 return -EINVAL; 507 } 508 509 if (!iov_cnt) { 510 SPDK_ERRLOG("iovcnt should not be zero value\n"); 511 return -EINVAL; 512 } 513 514 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 515 if (accel_task == NULL) { 516 SPDK_ERRLOG("no memory\n"); 517 assert(0); 518 return -ENOMEM; 519 } 520 521 accel_task->s.iovs = iov; 522 accel_task->s.iovcnt = iov_cnt; 523 accel_task->nbytes = accel_get_iovlen(iov, iov_cnt); 524 accel_task->crc_dst = crc_dst; 525 accel_task->seed = seed; 526 accel_task->op_code = ACCEL_OPC_CRC32C; 527 accel_task->src_domain = NULL; 528 accel_task->dst_domain = NULL; 529 accel_task->step_cb_fn = NULL; 530 531 return accel_submit_task(accel_ch, accel_task); 532 } 533 534 /* Accel framework public API for copy with CRC-32C function */ 535 int 536 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst, 537 void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes, 538 int flags, spdk_accel_completion_cb cb_fn, void *cb_arg) 539 { 540 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 541 struct spdk_accel_task *accel_task; 542 543 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 544 if (accel_task == NULL) { 545 return -ENOMEM; 546 } 547 548 accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC]; 549 accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST]; 550 accel_task->d.iovs[0].iov_base = dst; 551 accel_task->d.iovs[0].iov_len = nbytes; 552 accel_task->d.iovcnt = 1; 553 accel_task->s.iovs[0].iov_base = src; 554 accel_task->s.iovs[0].iov_len = nbytes; 555 accel_task->s.iovcnt = 1; 556 accel_task->nbytes = nbytes; 557 accel_task->crc_dst = crc_dst; 558 accel_task->seed = seed; 559 accel_task->flags = flags; 560 accel_task->op_code = ACCEL_OPC_COPY_CRC32C; 561 accel_task->src_domain = NULL; 562 accel_task->dst_domain = NULL; 563 accel_task->step_cb_fn = NULL; 564 565 return accel_submit_task(accel_ch, accel_task); 566 } 567 568 /* Accel framework public API for chained copy + CRC-32C function */ 569 int 570 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst, 571 struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst, 572 uint32_t seed, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg) 573 { 574 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 575 struct spdk_accel_task *accel_task; 576 uint64_t nbytes; 577 578 if (src_iovs == NULL) { 579 SPDK_ERRLOG("iov should not be NULL"); 580 return -EINVAL; 581 } 582 583 if (!iov_cnt) { 584 SPDK_ERRLOG("iovcnt should not be zero value\n"); 585 return -EINVAL; 586 } 587 588 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 589 if (accel_task == NULL) { 590 SPDK_ERRLOG("no memory\n"); 591 assert(0); 592 return -ENOMEM; 593 } 594 595 nbytes = accel_get_iovlen(src_iovs, iov_cnt); 596 accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST]; 597 accel_task->d.iovs[0].iov_base = dst; 598 accel_task->d.iovs[0].iov_len = nbytes; 599 accel_task->d.iovcnt = 1; 600 accel_task->s.iovs = src_iovs; 601 accel_task->s.iovcnt = iov_cnt; 602 accel_task->nbytes = nbytes; 603 accel_task->crc_dst = crc_dst; 604 accel_task->seed = seed; 605 accel_task->flags = flags; 606 accel_task->op_code = ACCEL_OPC_COPY_CRC32C; 607 accel_task->src_domain = NULL; 608 accel_task->dst_domain = NULL; 609 accel_task->step_cb_fn = NULL; 610 611 return accel_submit_task(accel_ch, accel_task); 612 } 613 614 int 615 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes, 616 struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size, int flags, 617 spdk_accel_completion_cb cb_fn, void *cb_arg) 618 { 619 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 620 struct spdk_accel_task *accel_task; 621 622 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 623 if (accel_task == NULL) { 624 return -ENOMEM; 625 } 626 627 accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST]; 628 accel_task->d.iovs[0].iov_base = dst; 629 accel_task->d.iovs[0].iov_len = nbytes; 630 accel_task->d.iovcnt = 1; 631 accel_task->output_size = output_size; 632 accel_task->s.iovs = src_iovs; 633 accel_task->s.iovcnt = src_iovcnt; 634 accel_task->nbytes = nbytes; 635 accel_task->flags = flags; 636 accel_task->op_code = ACCEL_OPC_COMPRESS; 637 accel_task->src_domain = NULL; 638 accel_task->dst_domain = NULL; 639 accel_task->step_cb_fn = NULL; 640 641 return accel_submit_task(accel_ch, accel_task); 642 } 643 644 int 645 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs, 646 size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt, 647 uint32_t *output_size, int flags, spdk_accel_completion_cb cb_fn, 648 void *cb_arg) 649 { 650 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 651 struct spdk_accel_task *accel_task; 652 653 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 654 if (accel_task == NULL) { 655 return -ENOMEM; 656 } 657 658 accel_task->output_size = output_size; 659 accel_task->s.iovs = src_iovs; 660 accel_task->s.iovcnt = src_iovcnt; 661 accel_task->d.iovs = dst_iovs; 662 accel_task->d.iovcnt = dst_iovcnt; 663 accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 664 accel_task->flags = flags; 665 accel_task->op_code = ACCEL_OPC_DECOMPRESS; 666 accel_task->src_domain = NULL; 667 accel_task->dst_domain = NULL; 668 accel_task->step_cb_fn = NULL; 669 670 return accel_submit_task(accel_ch, accel_task); 671 } 672 673 int 674 spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key, 675 struct iovec *dst_iovs, uint32_t dst_iovcnt, 676 struct iovec *src_iovs, uint32_t src_iovcnt, 677 uint64_t iv, uint32_t block_size, int flags, 678 spdk_accel_completion_cb cb_fn, void *cb_arg) 679 { 680 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 681 struct spdk_accel_task *accel_task; 682 683 if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) { 684 return -EINVAL; 685 } 686 687 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 688 if (accel_task == NULL) { 689 return -ENOMEM; 690 } 691 692 accel_task->crypto_key = key; 693 accel_task->s.iovs = src_iovs; 694 accel_task->s.iovcnt = src_iovcnt; 695 accel_task->d.iovs = dst_iovs; 696 accel_task->d.iovcnt = dst_iovcnt; 697 accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 698 accel_task->iv = iv; 699 accel_task->block_size = block_size; 700 accel_task->flags = flags; 701 accel_task->op_code = ACCEL_OPC_ENCRYPT; 702 accel_task->src_domain = NULL; 703 accel_task->dst_domain = NULL; 704 accel_task->step_cb_fn = NULL; 705 706 return accel_submit_task(accel_ch, accel_task); 707 } 708 709 int 710 spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key, 711 struct iovec *dst_iovs, uint32_t dst_iovcnt, 712 struct iovec *src_iovs, uint32_t src_iovcnt, 713 uint64_t iv, uint32_t block_size, int flags, 714 spdk_accel_completion_cb cb_fn, void *cb_arg) 715 { 716 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 717 struct spdk_accel_task *accel_task; 718 719 if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) { 720 return -EINVAL; 721 } 722 723 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 724 if (accel_task == NULL) { 725 return -ENOMEM; 726 } 727 728 accel_task->crypto_key = key; 729 accel_task->s.iovs = src_iovs; 730 accel_task->s.iovcnt = src_iovcnt; 731 accel_task->d.iovs = dst_iovs; 732 accel_task->d.iovcnt = dst_iovcnt; 733 accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 734 accel_task->iv = iv; 735 accel_task->block_size = block_size; 736 accel_task->flags = flags; 737 accel_task->op_code = ACCEL_OPC_DECRYPT; 738 accel_task->src_domain = NULL; 739 accel_task->dst_domain = NULL; 740 accel_task->step_cb_fn = NULL; 741 742 return accel_submit_task(accel_ch, accel_task); 743 } 744 745 int 746 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs, 747 uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg) 748 { 749 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 750 struct spdk_accel_task *accel_task; 751 752 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 753 if (accel_task == NULL) { 754 return -ENOMEM; 755 } 756 757 accel_task->nsrcs.srcs = sources; 758 accel_task->nsrcs.cnt = nsrcs; 759 accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST]; 760 accel_task->d.iovs[0].iov_base = dst; 761 accel_task->d.iovs[0].iov_len = nbytes; 762 accel_task->d.iovcnt = 1; 763 accel_task->nbytes = nbytes; 764 accel_task->op_code = ACCEL_OPC_XOR; 765 accel_task->src_domain = NULL; 766 accel_task->dst_domain = NULL; 767 accel_task->step_cb_fn = NULL; 768 769 return accel_submit_task(accel_ch, accel_task); 770 } 771 772 static inline struct accel_buffer * 773 accel_get_buf(struct accel_io_channel *ch, uint64_t len) 774 { 775 struct accel_buffer *buf; 776 777 buf = TAILQ_FIRST(&ch->buf_pool); 778 if (spdk_unlikely(buf == NULL)) { 779 return NULL; 780 } 781 782 TAILQ_REMOVE(&ch->buf_pool, buf, link); 783 buf->len = len; 784 buf->buf = NULL; 785 buf->seq = NULL; 786 buf->cb_fn = NULL; 787 788 return buf; 789 } 790 791 static inline void 792 accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf) 793 { 794 if (buf->buf != NULL) { 795 spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len); 796 } 797 798 TAILQ_INSERT_HEAD(&ch->buf_pool, buf, link); 799 } 800 801 static inline struct spdk_accel_sequence * 802 accel_sequence_get(struct accel_io_channel *ch) 803 { 804 struct spdk_accel_sequence *seq; 805 806 seq = TAILQ_FIRST(&ch->seq_pool); 807 if (seq == NULL) { 808 return NULL; 809 } 810 811 TAILQ_REMOVE(&ch->seq_pool, seq, link); 812 813 TAILQ_INIT(&seq->tasks); 814 TAILQ_INIT(&seq->completed); 815 TAILQ_INIT(&seq->bounce_bufs); 816 817 seq->ch = ch; 818 seq->status = 0; 819 seq->state = ACCEL_SEQUENCE_STATE_INIT; 820 seq->in_process_sequence = false; 821 822 return seq; 823 } 824 825 static inline void 826 accel_sequence_put(struct spdk_accel_sequence *seq) 827 { 828 struct accel_io_channel *ch = seq->ch; 829 struct accel_buffer *buf; 830 831 while (!TAILQ_EMPTY(&seq->bounce_bufs)) { 832 buf = TAILQ_FIRST(&seq->bounce_bufs); 833 TAILQ_REMOVE(&seq->bounce_bufs, buf, link); 834 accel_put_buf(seq->ch, buf); 835 } 836 837 assert(TAILQ_EMPTY(&seq->tasks)); 838 assert(TAILQ_EMPTY(&seq->completed)); 839 seq->ch = NULL; 840 841 TAILQ_INSERT_HEAD(&ch->seq_pool, seq, link); 842 } 843 844 static void accel_sequence_task_cb(void *cb_arg, int status); 845 846 static inline struct spdk_accel_task * 847 accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq, 848 spdk_accel_step_cb cb_fn, void *cb_arg) 849 { 850 struct spdk_accel_task *task; 851 852 task = _get_task(ch, accel_sequence_task_cb, seq); 853 if (task == NULL) { 854 return task; 855 } 856 857 task->step_cb_fn = cb_fn; 858 task->step_cb_arg = cb_arg; 859 860 return task; 861 } 862 863 int 864 spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 865 struct iovec *dst_iovs, uint32_t dst_iovcnt, 866 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 867 struct iovec *src_iovs, uint32_t src_iovcnt, 868 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 869 int flags, spdk_accel_step_cb cb_fn, void *cb_arg) 870 { 871 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 872 struct spdk_accel_task *task; 873 struct spdk_accel_sequence *seq = *pseq; 874 875 if (seq == NULL) { 876 seq = accel_sequence_get(accel_ch); 877 if (spdk_unlikely(seq == NULL)) { 878 return -ENOMEM; 879 } 880 } 881 882 assert(seq->ch == accel_ch); 883 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 884 if (spdk_unlikely(task == NULL)) { 885 if (*pseq == NULL) { 886 accel_sequence_put(seq); 887 } 888 889 return -ENOMEM; 890 } 891 892 task->dst_domain = dst_domain; 893 task->dst_domain_ctx = dst_domain_ctx; 894 task->d.iovs = dst_iovs; 895 task->d.iovcnt = dst_iovcnt; 896 task->src_domain = src_domain; 897 task->src_domain_ctx = src_domain_ctx; 898 task->s.iovs = src_iovs; 899 task->s.iovcnt = src_iovcnt; 900 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 901 task->flags = flags; 902 task->op_code = ACCEL_OPC_COPY; 903 904 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 905 *pseq = seq; 906 907 return 0; 908 } 909 910 int 911 spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 912 void *buf, uint64_t len, 913 struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern, 914 int flags, spdk_accel_step_cb cb_fn, void *cb_arg) 915 { 916 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 917 struct spdk_accel_task *task; 918 struct spdk_accel_sequence *seq = *pseq; 919 920 if (seq == NULL) { 921 seq = accel_sequence_get(accel_ch); 922 if (spdk_unlikely(seq == NULL)) { 923 return -ENOMEM; 924 } 925 } 926 927 assert(seq->ch == accel_ch); 928 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 929 if (spdk_unlikely(task == NULL)) { 930 if (*pseq == NULL) { 931 accel_sequence_put(seq); 932 } 933 934 return -ENOMEM; 935 } 936 937 memset(&task->fill_pattern, pattern, sizeof(uint64_t)); 938 939 task->d.iovs = &task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST]; 940 task->d.iovs[0].iov_base = buf; 941 task->d.iovs[0].iov_len = len; 942 task->d.iovcnt = 1; 943 task->nbytes = len; 944 task->src_domain = NULL; 945 task->dst_domain = domain; 946 task->dst_domain_ctx = domain_ctx; 947 task->flags = flags; 948 task->op_code = ACCEL_OPC_FILL; 949 950 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 951 *pseq = seq; 952 953 return 0; 954 } 955 956 int 957 spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 958 struct iovec *dst_iovs, size_t dst_iovcnt, 959 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 960 struct iovec *src_iovs, size_t src_iovcnt, 961 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 962 int flags, spdk_accel_step_cb cb_fn, void *cb_arg) 963 { 964 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 965 struct spdk_accel_task *task; 966 struct spdk_accel_sequence *seq = *pseq; 967 968 if (seq == NULL) { 969 seq = accel_sequence_get(accel_ch); 970 if (spdk_unlikely(seq == NULL)) { 971 return -ENOMEM; 972 } 973 } 974 975 assert(seq->ch == accel_ch); 976 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 977 if (spdk_unlikely(task == NULL)) { 978 if (*pseq == NULL) { 979 accel_sequence_put(seq); 980 } 981 982 return -ENOMEM; 983 } 984 985 /* TODO: support output_size for chaining */ 986 task->output_size = NULL; 987 task->dst_domain = dst_domain; 988 task->dst_domain_ctx = dst_domain_ctx; 989 task->d.iovs = dst_iovs; 990 task->d.iovcnt = dst_iovcnt; 991 task->src_domain = src_domain; 992 task->src_domain_ctx = src_domain_ctx; 993 task->s.iovs = src_iovs; 994 task->s.iovcnt = src_iovcnt; 995 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 996 task->flags = flags; 997 task->op_code = ACCEL_OPC_DECOMPRESS; 998 999 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1000 *pseq = seq; 1001 1002 return 0; 1003 } 1004 1005 int 1006 spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1007 struct spdk_accel_crypto_key *key, 1008 struct iovec *dst_iovs, uint32_t dst_iovcnt, 1009 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1010 struct iovec *src_iovs, uint32_t src_iovcnt, 1011 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1012 uint64_t iv, uint32_t block_size, int flags, 1013 spdk_accel_step_cb cb_fn, void *cb_arg) 1014 { 1015 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1016 struct spdk_accel_task *task; 1017 struct spdk_accel_sequence *seq = *pseq; 1018 1019 if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || 1020 !block_size)) { 1021 return -EINVAL; 1022 } 1023 1024 if (seq == NULL) { 1025 seq = accel_sequence_get(accel_ch); 1026 if (spdk_unlikely(seq == NULL)) { 1027 return -ENOMEM; 1028 } 1029 } 1030 1031 assert(seq->ch == accel_ch); 1032 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1033 if (spdk_unlikely(task == NULL)) { 1034 if (*pseq == NULL) { 1035 accel_sequence_put(seq); 1036 } 1037 1038 return -ENOMEM; 1039 } 1040 1041 task->crypto_key = key; 1042 task->src_domain = src_domain; 1043 task->src_domain_ctx = src_domain_ctx; 1044 task->s.iovs = src_iovs; 1045 task->s.iovcnt = src_iovcnt; 1046 task->dst_domain = dst_domain; 1047 task->dst_domain_ctx = dst_domain_ctx; 1048 task->d.iovs = dst_iovs; 1049 task->d.iovcnt = dst_iovcnt; 1050 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1051 task->iv = iv; 1052 task->block_size = block_size; 1053 task->flags = flags; 1054 task->op_code = ACCEL_OPC_ENCRYPT; 1055 1056 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1057 *pseq = seq; 1058 1059 return 0; 1060 } 1061 1062 int 1063 spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1064 struct spdk_accel_crypto_key *key, 1065 struct iovec *dst_iovs, uint32_t dst_iovcnt, 1066 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1067 struct iovec *src_iovs, uint32_t src_iovcnt, 1068 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1069 uint64_t iv, uint32_t block_size, int flags, 1070 spdk_accel_step_cb cb_fn, void *cb_arg) 1071 { 1072 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1073 struct spdk_accel_task *task; 1074 struct spdk_accel_sequence *seq = *pseq; 1075 1076 if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || 1077 !block_size)) { 1078 return -EINVAL; 1079 } 1080 1081 if (seq == NULL) { 1082 seq = accel_sequence_get(accel_ch); 1083 if (spdk_unlikely(seq == NULL)) { 1084 return -ENOMEM; 1085 } 1086 } 1087 1088 assert(seq->ch == accel_ch); 1089 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1090 if (spdk_unlikely(task == NULL)) { 1091 if (*pseq == NULL) { 1092 accel_sequence_put(seq); 1093 } 1094 1095 return -ENOMEM; 1096 } 1097 1098 task->crypto_key = key; 1099 task->src_domain = src_domain; 1100 task->src_domain_ctx = src_domain_ctx; 1101 task->s.iovs = src_iovs; 1102 task->s.iovcnt = src_iovcnt; 1103 task->dst_domain = dst_domain; 1104 task->dst_domain_ctx = dst_domain_ctx; 1105 task->d.iovs = dst_iovs; 1106 task->d.iovcnt = dst_iovcnt; 1107 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1108 task->iv = iv; 1109 task->block_size = block_size; 1110 task->flags = flags; 1111 task->op_code = ACCEL_OPC_DECRYPT; 1112 1113 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1114 *pseq = seq; 1115 1116 return 0; 1117 } 1118 1119 int 1120 spdk_accel_append_crc32c(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1121 uint32_t *dst, struct iovec *iovs, uint32_t iovcnt, 1122 struct spdk_memory_domain *domain, void *domain_ctx, 1123 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg) 1124 { 1125 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1126 struct spdk_accel_task *task; 1127 struct spdk_accel_sequence *seq = *pseq; 1128 1129 if (seq == NULL) { 1130 seq = accel_sequence_get(accel_ch); 1131 if (spdk_unlikely(seq == NULL)) { 1132 return -ENOMEM; 1133 } 1134 } 1135 1136 assert(seq->ch == accel_ch); 1137 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1138 if (spdk_unlikely(task == NULL)) { 1139 if (*pseq == NULL) { 1140 accel_sequence_put(seq); 1141 } 1142 1143 return -ENOMEM; 1144 } 1145 1146 task->s.iovs = iovs; 1147 task->s.iovcnt = iovcnt; 1148 task->src_domain = domain; 1149 task->src_domain_ctx = domain_ctx; 1150 task->nbytes = accel_get_iovlen(iovs, iovcnt); 1151 task->crc_dst = dst; 1152 task->seed = seed; 1153 task->op_code = ACCEL_OPC_CRC32C; 1154 task->dst_domain = NULL; 1155 1156 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1157 *pseq = seq; 1158 1159 return 0; 1160 } 1161 1162 int 1163 spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf, 1164 struct spdk_memory_domain **domain, void **domain_ctx) 1165 { 1166 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1167 struct accel_buffer *accel_buf; 1168 1169 accel_buf = accel_get_buf(accel_ch, len); 1170 if (spdk_unlikely(accel_buf == NULL)) { 1171 return -ENOMEM; 1172 } 1173 1174 /* We always return the same pointer and identify the buffers through domain_ctx */ 1175 *buf = ACCEL_BUFFER_BASE; 1176 *domain_ctx = accel_buf; 1177 *domain = g_accel_domain; 1178 1179 return 0; 1180 } 1181 1182 void 1183 spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf, 1184 struct spdk_memory_domain *domain, void *domain_ctx) 1185 { 1186 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1187 struct accel_buffer *accel_buf = domain_ctx; 1188 1189 assert(domain == g_accel_domain); 1190 assert(buf == ACCEL_BUFFER_BASE); 1191 1192 accel_put_buf(accel_ch, accel_buf); 1193 } 1194 1195 static void 1196 accel_sequence_complete_tasks(struct spdk_accel_sequence *seq) 1197 { 1198 struct spdk_accel_task *task; 1199 struct accel_io_channel *ch = seq->ch; 1200 spdk_accel_step_cb cb_fn; 1201 void *cb_arg; 1202 1203 while (!TAILQ_EMPTY(&seq->completed)) { 1204 task = TAILQ_FIRST(&seq->completed); 1205 TAILQ_REMOVE(&seq->completed, task, seq_link); 1206 cb_fn = task->step_cb_fn; 1207 cb_arg = task->step_cb_arg; 1208 TAILQ_INSERT_HEAD(&ch->task_pool, task, link); 1209 if (cb_fn != NULL) { 1210 cb_fn(cb_arg); 1211 } 1212 } 1213 1214 while (!TAILQ_EMPTY(&seq->tasks)) { 1215 task = TAILQ_FIRST(&seq->tasks); 1216 TAILQ_REMOVE(&seq->tasks, task, seq_link); 1217 cb_fn = task->step_cb_fn; 1218 cb_arg = task->step_cb_arg; 1219 TAILQ_INSERT_HEAD(&ch->task_pool, task, link); 1220 if (cb_fn != NULL) { 1221 cb_fn(cb_arg); 1222 } 1223 } 1224 } 1225 1226 static void 1227 accel_sequence_complete(struct spdk_accel_sequence *seq) 1228 { 1229 SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, seq->status); 1230 1231 accel_update_stats(seq->ch, sequence_executed, 1); 1232 if (spdk_unlikely(seq->status != 0)) { 1233 accel_update_stats(seq->ch, sequence_failed, 1); 1234 } 1235 1236 /* First notify all users that appended operations to this sequence */ 1237 accel_sequence_complete_tasks(seq); 1238 1239 /* Then notify the user that finished the sequence */ 1240 seq->cb_fn(seq->cb_arg, seq->status); 1241 1242 accel_sequence_put(seq); 1243 } 1244 1245 static void 1246 accel_update_virt_iov(struct iovec *diov, struct iovec *siov, struct accel_buffer *accel_buf) 1247 { 1248 uintptr_t offset; 1249 1250 offset = (uintptr_t)siov->iov_base & ACCEL_BUFFER_OFFSET_MASK; 1251 assert(offset < accel_buf->len); 1252 1253 diov->iov_base = (char *)accel_buf->buf + offset; 1254 diov->iov_len = siov->iov_len; 1255 } 1256 1257 static void 1258 accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf) 1259 { 1260 struct spdk_accel_task *task; 1261 struct iovec *iov; 1262 1263 /* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks 1264 * in a sequence that were using it. 1265 */ 1266 TAILQ_FOREACH(task, &seq->tasks, seq_link) { 1267 if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) { 1268 iov = &task->aux_iovs[SPDK_ACCEL_AXU_IOV_VIRT_SRC]; 1269 assert(task->s.iovcnt == 1); 1270 accel_update_virt_iov(iov, &task->s.iovs[0], buf); 1271 task->src_domain = NULL; 1272 task->s.iovs = iov; 1273 } 1274 if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) { 1275 iov = &task->aux_iovs[SPDK_ACCEL_AXU_IOV_VIRT_DST]; 1276 assert(task->d.iovcnt == 1); 1277 accel_update_virt_iov(iov, &task->d.iovs[0], buf); 1278 task->dst_domain = NULL; 1279 task->d.iovs = iov; 1280 } 1281 } 1282 } 1283 1284 static void accel_process_sequence(struct spdk_accel_sequence *seq); 1285 1286 static void 1287 accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf) 1288 { 1289 struct accel_buffer *accel_buf; 1290 1291 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1292 1293 assert(accel_buf->seq != NULL); 1294 assert(accel_buf->buf == NULL); 1295 accel_buf->buf = buf; 1296 1297 assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF); 1298 accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF); 1299 accel_sequence_set_virtbuf(accel_buf->seq, accel_buf); 1300 accel_process_sequence(accel_buf->seq); 1301 } 1302 1303 static bool 1304 accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf, 1305 spdk_iobuf_get_cb cb_fn) 1306 { 1307 struct accel_io_channel *ch = seq->ch; 1308 1309 assert(buf->buf == NULL); 1310 assert(buf->seq == NULL); 1311 1312 buf->seq = seq; 1313 buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn); 1314 if (buf->buf == NULL) { 1315 return false; 1316 } 1317 1318 return true; 1319 } 1320 1321 static bool 1322 accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1323 { 1324 /* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to 1325 * NULL */ 1326 if (task->src_domain == g_accel_domain) { 1327 if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx, 1328 accel_iobuf_get_virtbuf_cb)) { 1329 return false; 1330 } 1331 1332 accel_sequence_set_virtbuf(seq, task->src_domain_ctx); 1333 } 1334 1335 if (task->dst_domain == g_accel_domain) { 1336 if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx, 1337 accel_iobuf_get_virtbuf_cb)) { 1338 return false; 1339 } 1340 1341 accel_sequence_set_virtbuf(seq, task->dst_domain_ctx); 1342 } 1343 1344 return true; 1345 } 1346 1347 static void 1348 accel_sequence_get_buf_cb(struct spdk_iobuf_entry *entry, void *buf) 1349 { 1350 struct accel_buffer *accel_buf; 1351 1352 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1353 1354 assert(accel_buf->seq != NULL); 1355 assert(accel_buf->buf == NULL); 1356 accel_buf->buf = buf; 1357 1358 accel_sequence_set_virtbuf(accel_buf->seq, accel_buf); 1359 accel_buf->cb_fn(accel_buf->seq, accel_buf->cb_ctx); 1360 } 1361 1362 bool 1363 spdk_accel_alloc_sequence_buf(struct spdk_accel_sequence *seq, void *buf, 1364 struct spdk_memory_domain *domain, void *domain_ctx, 1365 spdk_accel_sequence_get_buf_cb cb_fn, void *cb_ctx) 1366 { 1367 struct accel_buffer *accel_buf = domain_ctx; 1368 1369 assert(domain == g_accel_domain); 1370 accel_buf->cb_fn = cb_fn; 1371 accel_buf->cb_ctx = cb_ctx; 1372 1373 if (!accel_sequence_alloc_buf(seq, accel_buf, accel_sequence_get_buf_cb)) { 1374 return false; 1375 } 1376 1377 accel_sequence_set_virtbuf(seq, accel_buf); 1378 1379 return true; 1380 } 1381 1382 struct spdk_accel_task * 1383 spdk_accel_sequence_first_task(struct spdk_accel_sequence *seq) 1384 { 1385 return TAILQ_FIRST(&seq->tasks); 1386 } 1387 1388 struct spdk_accel_task * 1389 spdk_accel_sequence_next_task(struct spdk_accel_task *task) 1390 { 1391 return TAILQ_NEXT(task, seq_link); 1392 } 1393 1394 static inline void 1395 accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs, 1396 uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx, 1397 struct accel_buffer *buf) 1398 { 1399 bounce->orig_iovs = *iovs; 1400 bounce->orig_iovcnt = *iovcnt; 1401 bounce->orig_domain = *domain; 1402 bounce->orig_domain_ctx = *domain_ctx; 1403 bounce->iov.iov_base = buf->buf; 1404 bounce->iov.iov_len = buf->len; 1405 1406 *iovs = &bounce->iov; 1407 *iovcnt = 1; 1408 *domain = NULL; 1409 } 1410 1411 static void 1412 accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf) 1413 { 1414 struct spdk_accel_task *task; 1415 struct accel_buffer *accel_buf; 1416 1417 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1418 assert(accel_buf->buf == NULL); 1419 accel_buf->buf = buf; 1420 1421 task = TAILQ_FIRST(&accel_buf->seq->tasks); 1422 assert(task != NULL); 1423 1424 assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF); 1425 accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF); 1426 accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain, 1427 &task->src_domain_ctx, accel_buf); 1428 accel_process_sequence(accel_buf->seq); 1429 } 1430 1431 static void 1432 accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf) 1433 { 1434 struct spdk_accel_task *task; 1435 struct accel_buffer *accel_buf; 1436 1437 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1438 assert(accel_buf->buf == NULL); 1439 accel_buf->buf = buf; 1440 1441 task = TAILQ_FIRST(&accel_buf->seq->tasks); 1442 assert(task != NULL); 1443 1444 assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF); 1445 accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF); 1446 accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain, 1447 &task->dst_domain_ctx, accel_buf); 1448 accel_process_sequence(accel_buf->seq); 1449 } 1450 1451 static int 1452 accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1453 { 1454 struct accel_buffer *buf; 1455 1456 if (task->src_domain != NULL) { 1457 /* By the time we're here, accel buffers should have been allocated */ 1458 assert(task->src_domain != g_accel_domain); 1459 1460 buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt)); 1461 if (buf == NULL) { 1462 SPDK_ERRLOG("Couldn't allocate buffer descriptor\n"); 1463 return -ENOMEM; 1464 } 1465 1466 TAILQ_INSERT_TAIL(&seq->bounce_bufs, buf, link); 1467 if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) { 1468 return -EAGAIN; 1469 } 1470 1471 accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt, 1472 &task->src_domain, &task->src_domain_ctx, buf); 1473 } 1474 1475 if (task->dst_domain != NULL) { 1476 /* By the time we're here, accel buffers should have been allocated */ 1477 assert(task->dst_domain != g_accel_domain); 1478 1479 buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt)); 1480 if (buf == NULL) { 1481 /* The src buffer will be released when a sequence is completed */ 1482 SPDK_ERRLOG("Couldn't allocate buffer descriptor\n"); 1483 return -ENOMEM; 1484 } 1485 1486 TAILQ_INSERT_TAIL(&seq->bounce_bufs, buf, link); 1487 if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) { 1488 return -EAGAIN; 1489 } 1490 1491 accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt, 1492 &task->dst_domain, &task->dst_domain_ctx, buf); 1493 } 1494 1495 return 0; 1496 } 1497 1498 static void 1499 accel_task_pull_data_cb(void *ctx, int status) 1500 { 1501 struct spdk_accel_sequence *seq = ctx; 1502 1503 assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA); 1504 if (spdk_likely(status == 0)) { 1505 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK); 1506 } else { 1507 accel_sequence_set_fail(seq, status); 1508 } 1509 1510 accel_process_sequence(seq); 1511 } 1512 1513 static void 1514 accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1515 { 1516 int rc; 1517 1518 assert(task->bounce.s.orig_iovs != NULL); 1519 assert(task->bounce.s.orig_domain != NULL); 1520 assert(task->bounce.s.orig_domain != g_accel_domain); 1521 assert(!g_modules_opc[task->op_code].supports_memory_domains); 1522 1523 rc = spdk_memory_domain_pull_data(task->bounce.s.orig_domain, 1524 task->bounce.s.orig_domain_ctx, 1525 task->bounce.s.orig_iovs, task->bounce.s.orig_iovcnt, 1526 task->s.iovs, task->s.iovcnt, 1527 accel_task_pull_data_cb, seq); 1528 if (spdk_unlikely(rc != 0)) { 1529 SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n", 1530 spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc); 1531 accel_sequence_set_fail(seq, rc); 1532 } 1533 } 1534 1535 static void 1536 accel_task_push_data_cb(void *ctx, int status) 1537 { 1538 struct spdk_accel_sequence *seq = ctx; 1539 1540 assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA); 1541 if (spdk_likely(status == 0)) { 1542 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK); 1543 } else { 1544 accel_sequence_set_fail(seq, status); 1545 } 1546 1547 accel_process_sequence(seq); 1548 } 1549 1550 static void 1551 accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1552 { 1553 int rc; 1554 1555 assert(task->bounce.d.orig_iovs != NULL); 1556 assert(task->bounce.d.orig_domain != NULL); 1557 assert(task->bounce.d.orig_domain != g_accel_domain); 1558 assert(!g_modules_opc[task->op_code].supports_memory_domains); 1559 1560 rc = spdk_memory_domain_push_data(task->bounce.d.orig_domain, 1561 task->bounce.d.orig_domain_ctx, 1562 task->bounce.d.orig_iovs, task->bounce.d.orig_iovcnt, 1563 task->d.iovs, task->d.iovcnt, 1564 accel_task_push_data_cb, seq); 1565 if (spdk_unlikely(rc != 0)) { 1566 SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n", 1567 spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc); 1568 accel_sequence_set_fail(seq, rc); 1569 } 1570 } 1571 1572 static void 1573 accel_process_sequence(struct spdk_accel_sequence *seq) 1574 { 1575 struct accel_io_channel *accel_ch = seq->ch; 1576 struct spdk_accel_task *task; 1577 enum accel_sequence_state state; 1578 int rc; 1579 1580 /* Prevent recursive calls to this function */ 1581 if (spdk_unlikely(seq->in_process_sequence)) { 1582 return; 1583 } 1584 seq->in_process_sequence = true; 1585 1586 task = TAILQ_FIRST(&seq->tasks); 1587 assert(task != NULL); 1588 1589 do { 1590 state = seq->state; 1591 switch (state) { 1592 case ACCEL_SEQUENCE_STATE_INIT: 1593 if (g_accel_driver != NULL) { 1594 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_EXEC); 1595 break; 1596 } 1597 /* Fall through */ 1598 case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF: 1599 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF); 1600 if (!accel_sequence_check_virtbuf(seq, task)) { 1601 /* We couldn't allocate a buffer, wait until one is available */ 1602 break; 1603 } 1604 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF); 1605 /* Fall through */ 1606 case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF: 1607 /* If a module supports memory domains, we don't need to allocate bounce 1608 * buffers */ 1609 if (g_modules_opc[task->op_code].supports_memory_domains) { 1610 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK); 1611 break; 1612 } 1613 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF); 1614 rc = accel_sequence_check_bouncebuf(seq, task); 1615 if (rc != 0) { 1616 /* We couldn't allocate a buffer, wait until one is available */ 1617 if (rc == -EAGAIN) { 1618 break; 1619 } 1620 accel_sequence_set_fail(seq, rc); 1621 break; 1622 } 1623 if (task->bounce.s.orig_iovs != NULL) { 1624 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA); 1625 break; 1626 } 1627 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK); 1628 /* Fall through */ 1629 case ACCEL_SEQUENCE_STATE_EXEC_TASK: 1630 SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n", 1631 g_opcode_strings[task->op_code], seq); 1632 1633 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK); 1634 rc = accel_submit_task(accel_ch, task); 1635 if (spdk_unlikely(rc != 0)) { 1636 SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n", 1637 g_opcode_strings[task->op_code], seq); 1638 accel_sequence_set_fail(seq, rc); 1639 } 1640 break; 1641 case ACCEL_SEQUENCE_STATE_PULL_DATA: 1642 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA); 1643 accel_task_pull_data(seq, task); 1644 break; 1645 case ACCEL_SEQUENCE_STATE_COMPLETE_TASK: 1646 if (task->bounce.d.orig_iovs != NULL) { 1647 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA); 1648 break; 1649 } 1650 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK); 1651 break; 1652 case ACCEL_SEQUENCE_STATE_PUSH_DATA: 1653 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA); 1654 accel_task_push_data(seq, task); 1655 break; 1656 case ACCEL_SEQUENCE_STATE_NEXT_TASK: 1657 TAILQ_REMOVE(&seq->tasks, task, seq_link); 1658 TAILQ_INSERT_TAIL(&seq->completed, task, seq_link); 1659 /* Check if there are any remaining tasks */ 1660 task = TAILQ_FIRST(&seq->tasks); 1661 if (task == NULL) { 1662 /* Immediately return here to make sure we don't touch the sequence 1663 * after it's completed */ 1664 accel_sequence_complete(seq); 1665 return; 1666 } 1667 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT); 1668 break; 1669 case ACCEL_SEQUENCE_STATE_DRIVER_EXEC: 1670 assert(!TAILQ_EMPTY(&seq->tasks)); 1671 1672 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK); 1673 rc = g_accel_driver->execute_sequence(seq); 1674 if (spdk_unlikely(rc != 0)) { 1675 SPDK_ERRLOG("Failed to execute sequence: %p using driver: %s\n", 1676 seq, g_accel_driver->name); 1677 accel_sequence_set_fail(seq, rc); 1678 } 1679 break; 1680 case ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE: 1681 task = TAILQ_FIRST(&seq->tasks); 1682 if (task == NULL) { 1683 /* Immediately return here to make sure we don't touch the sequence 1684 * after it's completed */ 1685 accel_sequence_complete(seq); 1686 return; 1687 } 1688 /* We don't want to execute the next task through the driver, so we 1689 * explicitly omit the INIT state here */ 1690 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF); 1691 break; 1692 case ACCEL_SEQUENCE_STATE_ERROR: 1693 /* Immediately return here to make sure we don't touch the sequence 1694 * after it's completed */ 1695 assert(seq->status != 0); 1696 accel_sequence_complete(seq); 1697 return; 1698 case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF: 1699 case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF: 1700 case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA: 1701 case ACCEL_SEQUENCE_STATE_AWAIT_TASK: 1702 case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA: 1703 case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK: 1704 break; 1705 default: 1706 assert(0 && "bad state"); 1707 break; 1708 } 1709 } while (seq->state != state); 1710 1711 seq->in_process_sequence = false; 1712 } 1713 1714 static void 1715 accel_sequence_task_cb(void *cb_arg, int status) 1716 { 1717 struct spdk_accel_sequence *seq = cb_arg; 1718 struct spdk_accel_task *task = TAILQ_FIRST(&seq->tasks); 1719 struct accel_io_channel *accel_ch = seq->ch; 1720 1721 /* spdk_accel_task_complete() puts the task back to the task pool, but we don't want to do 1722 * that if a task is part of a sequence. Removing the task from that pool here is the 1723 * easiest way to prevent this, even though it is a bit hacky. 1724 */ 1725 assert(task != NULL); 1726 TAILQ_REMOVE(&accel_ch->task_pool, task, link); 1727 1728 switch (seq->state) { 1729 case ACCEL_SEQUENCE_STATE_AWAIT_TASK: 1730 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK); 1731 if (spdk_unlikely(status != 0)) { 1732 SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n", 1733 g_opcode_strings[task->op_code], seq); 1734 accel_sequence_set_fail(seq, status); 1735 } 1736 1737 accel_process_sequence(seq); 1738 break; 1739 case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK: 1740 assert(g_accel_driver != NULL); 1741 /* Immediately remove the task from the outstanding list to make sure the next call 1742 * to spdk_accel_sequence_first_task() doesn't return it */ 1743 TAILQ_REMOVE(&seq->tasks, task, seq_link); 1744 TAILQ_INSERT_TAIL(&seq->completed, task, seq_link); 1745 1746 if (spdk_unlikely(status != 0)) { 1747 SPDK_ERRLOG("Failed to execute %s operation, sequence: %p through " 1748 "driver: %s\n", g_opcode_strings[task->op_code], seq, 1749 g_accel_driver->name); 1750 /* Update status without using accel_sequence_set_fail() to avoid changing 1751 * seq's state to ERROR until driver calls spdk_accel_sequence_continue() */ 1752 seq->status = status; 1753 } 1754 break; 1755 default: 1756 assert(0 && "bad state"); 1757 break; 1758 } 1759 } 1760 1761 void 1762 spdk_accel_sequence_continue(struct spdk_accel_sequence *seq) 1763 { 1764 assert(g_accel_driver != NULL); 1765 assert(seq->state == ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK); 1766 1767 if (spdk_likely(seq->status == 0)) { 1768 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE); 1769 } else { 1770 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR); 1771 } 1772 1773 accel_process_sequence(seq); 1774 } 1775 1776 static bool 1777 accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt) 1778 { 1779 /* For now, just do a dumb check that the iovecs arrays are exactly the same */ 1780 if (iovacnt != iovbcnt) { 1781 return false; 1782 } 1783 1784 return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0; 1785 } 1786 1787 static bool 1788 accel_task_set_dstbuf(struct spdk_accel_task *task, struct spdk_accel_task *next) 1789 { 1790 struct spdk_accel_task *prev; 1791 1792 switch (task->op_code) { 1793 case ACCEL_OPC_DECOMPRESS: 1794 case ACCEL_OPC_FILL: 1795 case ACCEL_OPC_ENCRYPT: 1796 case ACCEL_OPC_DECRYPT: 1797 if (task->dst_domain != next->src_domain) { 1798 return false; 1799 } 1800 if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt, 1801 next->s.iovs, next->s.iovcnt)) { 1802 return false; 1803 } 1804 task->d.iovs = next->d.iovs; 1805 task->d.iovcnt = next->d.iovcnt; 1806 task->dst_domain = next->dst_domain; 1807 task->dst_domain_ctx = next->dst_domain_ctx; 1808 break; 1809 case ACCEL_OPC_CRC32C: 1810 /* crc32 is special, because it doesn't have a dst buffer */ 1811 if (task->src_domain != next->src_domain) { 1812 return false; 1813 } 1814 if (!accel_compare_iovs(task->s.iovs, task->s.iovcnt, 1815 next->s.iovs, next->s.iovcnt)) { 1816 return false; 1817 } 1818 /* We can only change crc32's buffer if we can change previous task's buffer */ 1819 prev = TAILQ_PREV(task, accel_sequence_tasks, seq_link); 1820 if (prev == NULL) { 1821 return false; 1822 } 1823 if (!accel_task_set_dstbuf(prev, next)) { 1824 return false; 1825 } 1826 task->s.iovs = next->d.iovs; 1827 task->s.iovcnt = next->d.iovcnt; 1828 task->src_domain = next->dst_domain; 1829 task->src_domain_ctx = next->dst_domain_ctx; 1830 break; 1831 default: 1832 return false; 1833 } 1834 1835 return true; 1836 } 1837 1838 static void 1839 accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task, 1840 struct spdk_accel_task **next_task) 1841 { 1842 struct spdk_accel_task *next = *next_task; 1843 1844 switch (task->op_code) { 1845 case ACCEL_OPC_COPY: 1846 /* We only allow changing src of operations that actually have a src, e.g. we never 1847 * do it for fill. Theoretically, it is possible, but we'd have to be careful to 1848 * change the src of the operation after fill (which in turn could also be a fill). 1849 * So, for the sake of simplicity, skip this type of operations for now. 1850 */ 1851 if (next->op_code != ACCEL_OPC_DECOMPRESS && 1852 next->op_code != ACCEL_OPC_COPY && 1853 next->op_code != ACCEL_OPC_ENCRYPT && 1854 next->op_code != ACCEL_OPC_DECRYPT && 1855 next->op_code != ACCEL_OPC_CRC32C) { 1856 break; 1857 } 1858 if (task->dst_domain != next->src_domain) { 1859 break; 1860 } 1861 if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt, 1862 next->s.iovs, next->s.iovcnt)) { 1863 break; 1864 } 1865 next->s.iovs = task->s.iovs; 1866 next->s.iovcnt = task->s.iovcnt; 1867 next->src_domain = task->src_domain; 1868 next->src_domain_ctx = task->src_domain_ctx; 1869 TAILQ_REMOVE(&seq->tasks, task, seq_link); 1870 TAILQ_INSERT_TAIL(&seq->completed, task, seq_link); 1871 break; 1872 case ACCEL_OPC_DECOMPRESS: 1873 case ACCEL_OPC_FILL: 1874 case ACCEL_OPC_ENCRYPT: 1875 case ACCEL_OPC_DECRYPT: 1876 case ACCEL_OPC_CRC32C: 1877 /* We can only merge tasks when one of them is a copy */ 1878 if (next->op_code != ACCEL_OPC_COPY) { 1879 break; 1880 } 1881 if (!accel_task_set_dstbuf(task, next)) { 1882 break; 1883 } 1884 /* We're removing next_task from the tasks queue, so we need to update its pointer, 1885 * so that the TAILQ_FOREACH_SAFE() loop below works correctly */ 1886 *next_task = TAILQ_NEXT(next, seq_link); 1887 TAILQ_REMOVE(&seq->tasks, next, seq_link); 1888 TAILQ_INSERT_TAIL(&seq->completed, next, seq_link); 1889 break; 1890 default: 1891 assert(0 && "bad opcode"); 1892 break; 1893 } 1894 } 1895 1896 void 1897 spdk_accel_sequence_finish(struct spdk_accel_sequence *seq, 1898 spdk_accel_completion_cb cb_fn, void *cb_arg) 1899 { 1900 struct spdk_accel_task *task, *next; 1901 1902 /* Try to remove any copy operations if possible */ 1903 TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) { 1904 if (next == NULL) { 1905 break; 1906 } 1907 accel_sequence_merge_tasks(seq, task, &next); 1908 } 1909 1910 seq->cb_fn = cb_fn; 1911 seq->cb_arg = cb_arg; 1912 1913 accel_process_sequence(seq); 1914 } 1915 1916 void 1917 spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq) 1918 { 1919 struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks); 1920 struct spdk_accel_task *task; 1921 1922 assert(TAILQ_EMPTY(&seq->completed)); 1923 TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link); 1924 1925 while (!TAILQ_EMPTY(&tasks)) { 1926 task = TAILQ_FIRST(&tasks); 1927 TAILQ_REMOVE(&tasks, task, seq_link); 1928 TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link); 1929 } 1930 } 1931 1932 void 1933 spdk_accel_sequence_abort(struct spdk_accel_sequence *seq) 1934 { 1935 if (seq == NULL) { 1936 return; 1937 } 1938 1939 accel_sequence_complete_tasks(seq); 1940 accel_sequence_put(seq); 1941 } 1942 1943 struct spdk_memory_domain * 1944 spdk_accel_get_memory_domain(void) 1945 { 1946 return g_accel_domain; 1947 } 1948 1949 static struct spdk_accel_module_if * 1950 _module_find_by_name(const char *name) 1951 { 1952 struct spdk_accel_module_if *accel_module = NULL; 1953 1954 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 1955 if (strcmp(name, accel_module->name) == 0) { 1956 break; 1957 } 1958 } 1959 1960 return accel_module; 1961 } 1962 1963 static inline struct spdk_accel_crypto_key * 1964 _accel_crypto_key_get(const char *name) 1965 { 1966 struct spdk_accel_crypto_key *key; 1967 1968 assert(spdk_spin_held(&g_keyring_spin)); 1969 1970 TAILQ_FOREACH(key, &g_keyring, link) { 1971 if (strcmp(name, key->param.key_name) == 0) { 1972 return key; 1973 } 1974 } 1975 1976 return NULL; 1977 } 1978 1979 static void 1980 accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key) 1981 { 1982 if (key->param.hex_key) { 1983 spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2); 1984 free(key->param.hex_key); 1985 } 1986 if (key->param.hex_key2) { 1987 spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2); 1988 free(key->param.hex_key2); 1989 } 1990 free(key->param.key_name); 1991 free(key->param.cipher); 1992 if (key->key) { 1993 spdk_memset_s(key->key, key->key_size, 0, key->key_size); 1994 free(key->key); 1995 } 1996 if (key->key2) { 1997 spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size); 1998 free(key->key2); 1999 } 2000 free(key); 2001 } 2002 2003 static void 2004 accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key) 2005 { 2006 assert(key->module_if); 2007 assert(key->module_if->crypto_key_deinit); 2008 2009 key->module_if->crypto_key_deinit(key); 2010 accel_crypto_key_free_mem(key); 2011 } 2012 2013 /* 2014 * This function mitigates a timing side channel which could be caused by using strcmp() 2015 * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in 2016 * the article [1] for more details 2017 * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html 2018 */ 2019 static bool 2020 accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len) 2021 { 2022 size_t i; 2023 volatile size_t x = k1_len ^ k2_len; 2024 2025 for (i = 0; ((i < k1_len) & (i < k2_len)); i++) { 2026 x |= k1[i] ^ k2[i]; 2027 } 2028 2029 return x == 0; 2030 } 2031 2032 int 2033 spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param) 2034 { 2035 struct spdk_accel_module_if *module; 2036 struct spdk_accel_crypto_key *key; 2037 size_t hex_key_size, hex_key2_size; 2038 int rc; 2039 2040 if (!param || !param->hex_key || !param->cipher || !param->key_name) { 2041 return -EINVAL; 2042 } 2043 2044 if (g_modules_opc[ACCEL_OPC_ENCRYPT].module != g_modules_opc[ACCEL_OPC_DECRYPT].module) { 2045 /* hardly ever possible, but let's check and warn the user */ 2046 SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n"); 2047 } 2048 module = g_modules_opc[ACCEL_OPC_ENCRYPT].module; 2049 2050 if (!module) { 2051 SPDK_ERRLOG("No accel module found assigned for crypto operation\n"); 2052 return -ENOENT; 2053 } 2054 if (!module->crypto_key_init) { 2055 SPDK_ERRLOG("Accel module \"%s\" doesn't support crypto operations\n", module->name); 2056 return -ENOTSUP; 2057 } 2058 2059 key = calloc(1, sizeof(*key)); 2060 if (!key) { 2061 return -ENOMEM; 2062 } 2063 2064 key->param.key_name = strdup(param->key_name); 2065 if (!key->param.key_name) { 2066 rc = -ENOMEM; 2067 goto error; 2068 } 2069 2070 key->param.cipher = strdup(param->cipher); 2071 if (!key->param.cipher) { 2072 rc = -ENOMEM; 2073 goto error; 2074 } 2075 2076 hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2077 if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) { 2078 SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2079 rc = -EINVAL; 2080 goto error; 2081 } 2082 key->param.hex_key = strdup(param->hex_key); 2083 if (!key->param.hex_key) { 2084 rc = -ENOMEM; 2085 goto error; 2086 } 2087 2088 key->key_size = hex_key_size / 2; 2089 key->key = spdk_unhexlify(key->param.hex_key); 2090 if (!key->key) { 2091 SPDK_ERRLOG("Failed to unhexlify key1\n"); 2092 rc = -EINVAL; 2093 goto error; 2094 } 2095 2096 if (param->hex_key2) { 2097 hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2098 if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) { 2099 SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2100 rc = -EINVAL; 2101 goto error; 2102 } 2103 key->param.hex_key2 = strdup(param->hex_key2); 2104 if (!key->param.hex_key2) { 2105 rc = -ENOMEM; 2106 goto error; 2107 } 2108 2109 key->key2_size = hex_key2_size / 2; 2110 key->key2 = spdk_unhexlify(key->param.hex_key2); 2111 if (!key->key2) { 2112 SPDK_ERRLOG("Failed to unhexlify key2\n"); 2113 rc = -EINVAL; 2114 goto error; 2115 } 2116 2117 if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) { 2118 SPDK_ERRLOG("Identical keys are not secure\n"); 2119 rc = -EINVAL; 2120 goto error; 2121 } 2122 } 2123 2124 key->module_if = module; 2125 2126 spdk_spin_lock(&g_keyring_spin); 2127 if (_accel_crypto_key_get(param->key_name)) { 2128 rc = -EEXIST; 2129 } else { 2130 rc = module->crypto_key_init(key); 2131 if (!rc) { 2132 TAILQ_INSERT_TAIL(&g_keyring, key, link); 2133 } 2134 } 2135 spdk_spin_unlock(&g_keyring_spin); 2136 2137 if (rc) { 2138 goto error; 2139 } 2140 2141 return 0; 2142 2143 error: 2144 accel_crypto_key_free_mem(key); 2145 return rc; 2146 } 2147 2148 int 2149 spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key) 2150 { 2151 if (!key || !key->module_if) { 2152 return -EINVAL; 2153 } 2154 2155 spdk_spin_lock(&g_keyring_spin); 2156 if (!_accel_crypto_key_get(key->param.key_name)) { 2157 spdk_spin_unlock(&g_keyring_spin); 2158 return -ENOENT; 2159 } 2160 TAILQ_REMOVE(&g_keyring, key, link); 2161 spdk_spin_unlock(&g_keyring_spin); 2162 2163 accel_crypto_key_destroy_unsafe(key); 2164 2165 return 0; 2166 } 2167 2168 struct spdk_accel_crypto_key * 2169 spdk_accel_crypto_key_get(const char *name) 2170 { 2171 struct spdk_accel_crypto_key *key; 2172 2173 spdk_spin_lock(&g_keyring_spin); 2174 key = _accel_crypto_key_get(name); 2175 spdk_spin_unlock(&g_keyring_spin); 2176 2177 return key; 2178 } 2179 2180 /* Helper function when accel modules register with the framework. */ 2181 void 2182 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module) 2183 { 2184 if (_module_find_by_name(accel_module->name)) { 2185 SPDK_NOTICELOG("Accel module %s already registered\n", accel_module->name); 2186 assert(false); 2187 return; 2188 } 2189 2190 /* Make sure that the software module is at the head of the list, this 2191 * will assure that all opcodes are later assigned to software first and 2192 * then updated to HW modules as they are registered. 2193 */ 2194 if (strcmp(accel_module->name, "software") == 0) { 2195 TAILQ_INSERT_HEAD(&spdk_accel_module_list, accel_module, tailq); 2196 } else { 2197 TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq); 2198 } 2199 2200 if (accel_module->get_ctx_size && accel_module->get_ctx_size() > g_max_accel_module_size) { 2201 g_max_accel_module_size = accel_module->get_ctx_size(); 2202 } 2203 } 2204 2205 /* Framework level channel create callback. */ 2206 static int 2207 accel_create_channel(void *io_device, void *ctx_buf) 2208 { 2209 struct accel_io_channel *accel_ch = ctx_buf; 2210 struct spdk_accel_task *accel_task; 2211 struct spdk_accel_sequence *seq; 2212 struct accel_buffer *buf; 2213 uint8_t *task_mem; 2214 uint32_t i = 0, j; 2215 int rc; 2216 2217 accel_ch->task_pool_base = calloc(g_opts.task_count, g_max_accel_module_size); 2218 if (accel_ch->task_pool_base == NULL) { 2219 return -ENOMEM; 2220 } 2221 2222 accel_ch->seq_pool_base = calloc(g_opts.sequence_count, sizeof(struct spdk_accel_sequence)); 2223 if (accel_ch->seq_pool_base == NULL) { 2224 goto err; 2225 } 2226 2227 accel_ch->buf_pool_base = calloc(g_opts.buf_count, sizeof(struct accel_buffer)); 2228 if (accel_ch->buf_pool_base == NULL) { 2229 goto err; 2230 } 2231 2232 TAILQ_INIT(&accel_ch->task_pool); 2233 TAILQ_INIT(&accel_ch->seq_pool); 2234 TAILQ_INIT(&accel_ch->buf_pool); 2235 2236 task_mem = accel_ch->task_pool_base; 2237 for (i = 0; i < g_opts.task_count; i++) { 2238 accel_task = (struct spdk_accel_task *)task_mem; 2239 TAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link); 2240 task_mem += g_max_accel_module_size; 2241 } 2242 for (i = 0; i < g_opts.sequence_count; i++) { 2243 seq = &accel_ch->seq_pool_base[i]; 2244 TAILQ_INSERT_TAIL(&accel_ch->seq_pool, seq, link); 2245 } 2246 for (i = 0; i < g_opts.buf_count; i++) { 2247 buf = &accel_ch->buf_pool_base[i]; 2248 TAILQ_INSERT_TAIL(&accel_ch->buf_pool, buf, link); 2249 } 2250 2251 /* Assign modules and get IO channels for each */ 2252 for (i = 0; i < ACCEL_OPC_LAST; i++) { 2253 accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel(); 2254 /* This can happen if idxd runs out of channels. */ 2255 if (accel_ch->module_ch[i] == NULL) { 2256 goto err; 2257 } 2258 } 2259 2260 rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", g_opts.small_cache_size, 2261 g_opts.large_cache_size); 2262 if (rc != 0) { 2263 SPDK_ERRLOG("Failed to initialize iobuf accel channel\n"); 2264 goto err; 2265 } 2266 2267 return 0; 2268 err: 2269 for (j = 0; j < i; j++) { 2270 spdk_put_io_channel(accel_ch->module_ch[j]); 2271 } 2272 free(accel_ch->task_pool_base); 2273 free(accel_ch->seq_pool_base); 2274 free(accel_ch->buf_pool_base); 2275 2276 return -ENOMEM; 2277 } 2278 2279 static void 2280 accel_add_stats(struct accel_stats *total, struct accel_stats *stats) 2281 { 2282 int i; 2283 2284 total->sequence_executed += stats->sequence_executed; 2285 total->sequence_failed += stats->sequence_failed; 2286 for (i = 0; i < ACCEL_OPC_LAST; ++i) { 2287 total->operations[i].executed += stats->operations[i].executed; 2288 total->operations[i].failed += stats->operations[i].failed; 2289 total->operations[i].num_bytes += stats->operations[i].num_bytes; 2290 } 2291 } 2292 2293 /* Framework level channel destroy callback. */ 2294 static void 2295 accel_destroy_channel(void *io_device, void *ctx_buf) 2296 { 2297 struct accel_io_channel *accel_ch = ctx_buf; 2298 int i; 2299 2300 spdk_iobuf_channel_fini(&accel_ch->iobuf); 2301 2302 for (i = 0; i < ACCEL_OPC_LAST; i++) { 2303 assert(accel_ch->module_ch[i] != NULL); 2304 spdk_put_io_channel(accel_ch->module_ch[i]); 2305 accel_ch->module_ch[i] = NULL; 2306 } 2307 2308 /* Update global stats to make sure channel's stats aren't lost after a channel is gone */ 2309 spdk_spin_lock(&g_stats_lock); 2310 accel_add_stats(&g_stats, &accel_ch->stats); 2311 spdk_spin_unlock(&g_stats_lock); 2312 2313 free(accel_ch->task_pool_base); 2314 free(accel_ch->seq_pool_base); 2315 free(accel_ch->buf_pool_base); 2316 } 2317 2318 struct spdk_io_channel * 2319 spdk_accel_get_io_channel(void) 2320 { 2321 return spdk_get_io_channel(&spdk_accel_module_list); 2322 } 2323 2324 static void 2325 accel_module_initialize(void) 2326 { 2327 struct spdk_accel_module_if *accel_module; 2328 2329 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 2330 accel_module->module_init(); 2331 } 2332 } 2333 2334 static void 2335 accel_module_init_opcode(enum accel_opcode opcode) 2336 { 2337 struct accel_module *module = &g_modules_opc[opcode]; 2338 struct spdk_accel_module_if *module_if = module->module; 2339 2340 if (module_if->get_memory_domains != NULL) { 2341 module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0; 2342 } 2343 } 2344 2345 int 2346 spdk_accel_initialize(void) 2347 { 2348 enum accel_opcode op; 2349 struct spdk_accel_module_if *accel_module = NULL; 2350 int rc; 2351 2352 rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL, 2353 "SPDK_ACCEL_DMA_DEVICE"); 2354 if (rc != 0) { 2355 SPDK_ERRLOG("Failed to create accel memory domain\n"); 2356 return rc; 2357 } 2358 2359 spdk_spin_init(&g_keyring_spin); 2360 spdk_spin_init(&g_stats_lock); 2361 2362 g_modules_started = true; 2363 accel_module_initialize(); 2364 2365 /* Create our priority global map of opcodes to modules, we populate starting 2366 * with the software module (guaranteed to be first on the list) and then 2367 * updating opcodes with HW modules that have been initialized. 2368 * NOTE: all opcodes must be supported by software in the event that no HW 2369 * modules are initialized to support the operation. 2370 */ 2371 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 2372 for (op = 0; op < ACCEL_OPC_LAST; op++) { 2373 if (accel_module->supports_opcode(op)) { 2374 g_modules_opc[op].module = accel_module; 2375 SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name); 2376 } 2377 } 2378 } 2379 2380 /* Now lets check for overrides and apply all that exist */ 2381 for (op = 0; op < ACCEL_OPC_LAST; op++) { 2382 if (g_modules_opc_override[op] != NULL) { 2383 accel_module = _module_find_by_name(g_modules_opc_override[op]); 2384 if (accel_module == NULL) { 2385 SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]); 2386 rc = -EINVAL; 2387 goto error; 2388 } 2389 if (accel_module->supports_opcode(op) == false) { 2390 SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op); 2391 rc = -EINVAL; 2392 goto error; 2393 } 2394 g_modules_opc[op].module = accel_module; 2395 } 2396 } 2397 2398 if (g_modules_opc[ACCEL_OPC_ENCRYPT].module != g_modules_opc[ACCEL_OPC_DECRYPT].module) { 2399 SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations"); 2400 rc = -EINVAL; 2401 goto error; 2402 } 2403 2404 for (op = 0; op < ACCEL_OPC_LAST; op++) { 2405 assert(g_modules_opc[op].module != NULL); 2406 accel_module_init_opcode(op); 2407 } 2408 2409 rc = spdk_iobuf_register_module("accel"); 2410 if (rc != 0) { 2411 SPDK_ERRLOG("Failed to register accel iobuf module\n"); 2412 goto error; 2413 } 2414 2415 /* 2416 * We need a unique identifier for the accel framework, so use the 2417 * spdk_accel_module_list address for this purpose. 2418 */ 2419 spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel, 2420 sizeof(struct accel_io_channel), "accel"); 2421 2422 return 0; 2423 error: 2424 spdk_memory_domain_destroy(g_accel_domain); 2425 2426 return rc; 2427 } 2428 2429 static void 2430 accel_module_finish_cb(void) 2431 { 2432 spdk_accel_fini_cb cb_fn = g_fini_cb_fn; 2433 2434 spdk_memory_domain_destroy(g_accel_domain); 2435 2436 cb_fn(g_fini_cb_arg); 2437 g_fini_cb_fn = NULL; 2438 g_fini_cb_arg = NULL; 2439 } 2440 2441 static void 2442 accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str, 2443 const char *module_str) 2444 { 2445 spdk_json_write_object_begin(w); 2446 spdk_json_write_named_string(w, "method", "accel_assign_opc"); 2447 spdk_json_write_named_object_begin(w, "params"); 2448 spdk_json_write_named_string(w, "opname", opc_str); 2449 spdk_json_write_named_string(w, "module", module_str); 2450 spdk_json_write_object_end(w); 2451 spdk_json_write_object_end(w); 2452 } 2453 2454 static void 2455 __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key) 2456 { 2457 spdk_json_write_named_string(w, "name", key->param.key_name); 2458 spdk_json_write_named_string(w, "cipher", key->param.cipher); 2459 spdk_json_write_named_string(w, "key", key->param.hex_key); 2460 if (key->param.hex_key2) { 2461 spdk_json_write_named_string(w, "key2", key->param.hex_key2); 2462 } 2463 } 2464 2465 void 2466 _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key) 2467 { 2468 spdk_json_write_object_begin(w); 2469 __accel_crypto_key_dump_param(w, key); 2470 spdk_json_write_object_end(w); 2471 } 2472 2473 static void 2474 _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w, 2475 struct spdk_accel_crypto_key *key) 2476 { 2477 spdk_json_write_object_begin(w); 2478 spdk_json_write_named_string(w, "method", "accel_crypto_key_create"); 2479 spdk_json_write_named_object_begin(w, "params"); 2480 __accel_crypto_key_dump_param(w, key); 2481 spdk_json_write_object_end(w); 2482 spdk_json_write_object_end(w); 2483 } 2484 2485 static void 2486 accel_write_options(struct spdk_json_write_ctx *w) 2487 { 2488 spdk_json_write_object_begin(w); 2489 spdk_json_write_named_string(w, "method", "accel_set_options"); 2490 spdk_json_write_named_object_begin(w, "params"); 2491 spdk_json_write_named_uint32(w, "small_cache_size", g_opts.small_cache_size); 2492 spdk_json_write_named_uint32(w, "large_cache_size", g_opts.large_cache_size); 2493 spdk_json_write_named_uint32(w, "task_count", g_opts.task_count); 2494 spdk_json_write_named_uint32(w, "sequence_count", g_opts.sequence_count); 2495 spdk_json_write_named_uint32(w, "buf_count", g_opts.buf_count); 2496 spdk_json_write_object_end(w); 2497 spdk_json_write_object_end(w); 2498 } 2499 2500 static void 2501 _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump) 2502 { 2503 struct spdk_accel_crypto_key *key; 2504 2505 spdk_spin_lock(&g_keyring_spin); 2506 TAILQ_FOREACH(key, &g_keyring, link) { 2507 if (full_dump) { 2508 _accel_crypto_key_write_config_json(w, key); 2509 } else { 2510 _accel_crypto_key_dump_param(w, key); 2511 } 2512 } 2513 spdk_spin_unlock(&g_keyring_spin); 2514 } 2515 2516 void 2517 _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w) 2518 { 2519 _accel_crypto_keys_write_config_json(w, false); 2520 } 2521 2522 void 2523 spdk_accel_write_config_json(struct spdk_json_write_ctx *w) 2524 { 2525 struct spdk_accel_module_if *accel_module; 2526 int i; 2527 2528 spdk_json_write_array_begin(w); 2529 accel_write_options(w); 2530 2531 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 2532 if (accel_module->write_config_json) { 2533 accel_module->write_config_json(w); 2534 } 2535 } 2536 for (i = 0; i < ACCEL_OPC_LAST; i++) { 2537 if (g_modules_opc_override[i]) { 2538 accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]); 2539 } 2540 } 2541 2542 _accel_crypto_keys_write_config_json(w, true); 2543 2544 spdk_json_write_array_end(w); 2545 } 2546 2547 void 2548 spdk_accel_module_finish(void) 2549 { 2550 if (!g_accel_module) { 2551 g_accel_module = TAILQ_FIRST(&spdk_accel_module_list); 2552 } else { 2553 g_accel_module = TAILQ_NEXT(g_accel_module, tailq); 2554 } 2555 2556 if (!g_accel_module) { 2557 spdk_spin_destroy(&g_keyring_spin); 2558 spdk_spin_destroy(&g_stats_lock); 2559 accel_module_finish_cb(); 2560 return; 2561 } 2562 2563 if (g_accel_module->module_fini) { 2564 spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL); 2565 } else { 2566 spdk_accel_module_finish(); 2567 } 2568 } 2569 2570 static void 2571 accel_io_device_unregister_cb(void *io_device) 2572 { 2573 struct spdk_accel_crypto_key *key, *key_tmp; 2574 enum accel_opcode op; 2575 2576 spdk_spin_lock(&g_keyring_spin); 2577 TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) { 2578 accel_crypto_key_destroy_unsafe(key); 2579 } 2580 spdk_spin_unlock(&g_keyring_spin); 2581 2582 for (op = 0; op < ACCEL_OPC_LAST; op++) { 2583 if (g_modules_opc_override[op] != NULL) { 2584 free(g_modules_opc_override[op]); 2585 g_modules_opc_override[op] = NULL; 2586 } 2587 g_modules_opc[op].module = NULL; 2588 } 2589 2590 spdk_accel_module_finish(); 2591 } 2592 2593 void 2594 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg) 2595 { 2596 assert(cb_fn != NULL); 2597 2598 g_fini_cb_fn = cb_fn; 2599 g_fini_cb_arg = cb_arg; 2600 2601 spdk_io_device_unregister(&spdk_accel_module_list, accel_io_device_unregister_cb); 2602 } 2603 2604 static struct spdk_accel_driver * 2605 accel_find_driver(const char *name) 2606 { 2607 struct spdk_accel_driver *driver; 2608 2609 TAILQ_FOREACH(driver, &g_accel_drivers, tailq) { 2610 if (strcmp(driver->name, name) == 0) { 2611 return driver; 2612 } 2613 } 2614 2615 return NULL; 2616 } 2617 2618 int 2619 spdk_accel_set_driver(const char *name) 2620 { 2621 struct spdk_accel_driver *driver; 2622 2623 driver = accel_find_driver(name); 2624 if (driver == NULL) { 2625 SPDK_ERRLOG("Couldn't find driver named '%s'\n", name); 2626 return -ENODEV; 2627 } 2628 2629 g_accel_driver = driver; 2630 2631 return 0; 2632 } 2633 2634 void 2635 spdk_accel_driver_register(struct spdk_accel_driver *driver) 2636 { 2637 if (accel_find_driver(driver->name)) { 2638 SPDK_ERRLOG("Driver named '%s' has already been registered\n", driver->name); 2639 assert(0); 2640 return; 2641 } 2642 2643 TAILQ_INSERT_TAIL(&g_accel_drivers, driver, tailq); 2644 } 2645 2646 int 2647 spdk_accel_set_opts(const struct spdk_accel_opts *opts) 2648 { 2649 if (opts->size > sizeof(*opts)) { 2650 return -EINVAL; 2651 } 2652 2653 memcpy(&g_opts, opts, opts->size); 2654 2655 return 0; 2656 } 2657 2658 void 2659 spdk_accel_get_opts(struct spdk_accel_opts *opts) 2660 { 2661 size_t size = opts->size; 2662 2663 assert(size <= sizeof(*opts)); 2664 2665 memcpy(opts, &g_opts, spdk_min(sizeof(*opts), size)); 2666 opts->size = size; 2667 } 2668 2669 struct accel_get_stats_ctx { 2670 struct accel_stats stats; 2671 accel_get_stats_cb cb_fn; 2672 void *cb_arg; 2673 }; 2674 2675 static void 2676 accel_get_channel_stats_done(struct spdk_io_channel_iter *iter, int status) 2677 { 2678 struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter); 2679 2680 ctx->cb_fn(&ctx->stats, ctx->cb_arg); 2681 free(ctx); 2682 } 2683 2684 static void 2685 accel_get_channel_stats(struct spdk_io_channel_iter *iter) 2686 { 2687 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(iter); 2688 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 2689 struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter); 2690 2691 accel_add_stats(&ctx->stats, &accel_ch->stats); 2692 spdk_for_each_channel_continue(iter, 0); 2693 } 2694 2695 int 2696 accel_get_stats(accel_get_stats_cb cb_fn, void *cb_arg) 2697 { 2698 struct accel_get_stats_ctx *ctx; 2699 2700 ctx = calloc(1, sizeof(*ctx)); 2701 if (ctx == NULL) { 2702 return -ENOMEM; 2703 } 2704 2705 spdk_spin_lock(&g_stats_lock); 2706 accel_add_stats(&ctx->stats, &g_stats); 2707 spdk_spin_unlock(&g_stats_lock); 2708 2709 ctx->cb_fn = cb_fn; 2710 ctx->cb_arg = cb_arg; 2711 2712 spdk_for_each_channel(&spdk_accel_module_list, accel_get_channel_stats, ctx, 2713 accel_get_channel_stats_done); 2714 2715 return 0; 2716 } 2717 2718 void 2719 spdk_accel_get_opcode_stats(struct spdk_io_channel *ch, enum accel_opcode opcode, 2720 struct spdk_accel_opcode_stats *stats, size_t size) 2721 { 2722 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 2723 2724 #define FIELD_OK(field) \ 2725 offsetof(struct spdk_accel_opcode_stats, field) + sizeof(stats->field) <= size 2726 2727 #define SET_FIELD(field, value) \ 2728 if (FIELD_OK(field)) { \ 2729 stats->field = value; \ 2730 } 2731 2732 SET_FIELD(executed, accel_ch->stats.operations[opcode].executed); 2733 SET_FIELD(failed, accel_ch->stats.operations[opcode].failed); 2734 SET_FIELD(num_bytes, accel_ch->stats.operations[opcode].num_bytes); 2735 2736 #undef FIELD_OK 2737 #undef SET_FIELD 2738 } 2739 2740 SPDK_LOG_REGISTER_COMPONENT(accel) 2741