1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2020 Intel Corporation. 3 * Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES. 4 * All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk/accel_module.h" 10 11 #include "accel_internal.h" 12 13 #include "spdk/dma.h" 14 #include "spdk/env.h" 15 #include "spdk/likely.h" 16 #include "spdk/log.h" 17 #include "spdk/thread.h" 18 #include "spdk/json.h" 19 #include "spdk/crc32.h" 20 #include "spdk/util.h" 21 #include "spdk/hexlify.h" 22 #include "spdk/string.h" 23 24 /* Accelerator Framework: The following provides a top level 25 * generic API for the accelerator functions defined here. Modules, 26 * such as the one in /module/accel/ioat, supply the implementation 27 * with the exception of the pure software implementation contained 28 * later in this file. 29 */ 30 31 #define ALIGN_4K 0x1000 32 #define ACCEL_TASKS_PER_CHANNEL 2048 33 #define ACCEL_SMALL_CACHE_SIZE 128 34 #define ACCEL_LARGE_CACHE_SIZE 16 35 /* Set MSB, so we don't return NULL pointers as buffers */ 36 #define ACCEL_BUFFER_BASE ((void *)(1ull << 63)) 37 #define ACCEL_BUFFER_OFFSET_MASK ((uintptr_t)ACCEL_BUFFER_BASE - 1) 38 39 #define ACCEL_CRYPTO_TWEAK_MODE_DEFAULT SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA 40 #define ACCEL_TASKS_IN_SEQUENCE_LIMIT 8 41 42 struct accel_module { 43 struct spdk_accel_module_if *module; 44 bool supports_memory_domains; 45 }; 46 47 /* Largest context size for all accel modules */ 48 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task); 49 50 static struct spdk_accel_module_if *g_accel_module = NULL; 51 static spdk_accel_fini_cb g_fini_cb_fn = NULL; 52 static void *g_fini_cb_arg = NULL; 53 static bool g_modules_started = false; 54 static struct spdk_memory_domain *g_accel_domain; 55 56 /* Global list of registered accelerator modules */ 57 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list = 58 TAILQ_HEAD_INITIALIZER(spdk_accel_module_list); 59 60 /* Crypto keyring */ 61 static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring); 62 static struct spdk_spinlock g_keyring_spin; 63 64 /* Global array mapping capabilities to modules */ 65 static struct accel_module g_modules_opc[SPDK_ACCEL_OPC_LAST] = {}; 66 static char *g_modules_opc_override[SPDK_ACCEL_OPC_LAST] = {}; 67 TAILQ_HEAD(, spdk_accel_driver) g_accel_drivers = TAILQ_HEAD_INITIALIZER(g_accel_drivers); 68 static struct spdk_accel_driver *g_accel_driver; 69 static struct spdk_accel_opts g_opts = { 70 .small_cache_size = ACCEL_SMALL_CACHE_SIZE, 71 .large_cache_size = ACCEL_LARGE_CACHE_SIZE, 72 .task_count = ACCEL_TASKS_PER_CHANNEL, 73 .sequence_count = ACCEL_TASKS_PER_CHANNEL, 74 .buf_count = ACCEL_TASKS_PER_CHANNEL, 75 }; 76 static struct accel_stats g_stats; 77 static struct spdk_spinlock g_stats_lock; 78 79 static const char *g_opcode_strings[SPDK_ACCEL_OPC_LAST] = { 80 "copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c", 81 "compress", "decompress", "encrypt", "decrypt", "xor", 82 "dif_verify", "dif_verify_copy", "dif_generate", "dif_generate_copy", 83 "dix_generate", "dix_verify" 84 }; 85 86 enum accel_sequence_state { 87 ACCEL_SEQUENCE_STATE_INIT, 88 ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF, 89 ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF, 90 ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF, 91 ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF, 92 ACCEL_SEQUENCE_STATE_PULL_DATA, 93 ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA, 94 ACCEL_SEQUENCE_STATE_EXEC_TASK, 95 ACCEL_SEQUENCE_STATE_AWAIT_TASK, 96 ACCEL_SEQUENCE_STATE_COMPLETE_TASK, 97 ACCEL_SEQUENCE_STATE_NEXT_TASK, 98 ACCEL_SEQUENCE_STATE_PUSH_DATA, 99 ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA, 100 ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS, 101 ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS, 102 ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS, 103 ACCEL_SEQUENCE_STATE_ERROR, 104 ACCEL_SEQUENCE_STATE_MAX, 105 }; 106 107 static const char *g_seq_states[] 108 __attribute__((unused)) = { 109 [ACCEL_SEQUENCE_STATE_INIT] = "init", 110 [ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf", 111 [ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf", 112 [ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf", 113 [ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf", 114 [ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data", 115 [ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data", 116 [ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task", 117 [ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task", 118 [ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task", 119 [ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task", 120 [ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data", 121 [ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data", 122 [ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS] = "driver-exec-tasks", 123 [ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS] = "driver-await-tasks", 124 [ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS] = "driver-complete-tasks", 125 [ACCEL_SEQUENCE_STATE_ERROR] = "error", 126 [ACCEL_SEQUENCE_STATE_MAX] = "", 127 }; 128 129 #define ACCEL_SEQUENCE_STATE_STRING(s) \ 130 (((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \ 131 ? g_seq_states[s] : "unknown") 132 133 struct accel_buffer { 134 struct spdk_accel_sequence *seq; 135 void *buf; 136 uint64_t len; 137 struct spdk_iobuf_entry iobuf; 138 spdk_accel_sequence_get_buf_cb cb_fn; 139 void *cb_ctx; 140 SLIST_ENTRY(accel_buffer) link; 141 struct accel_io_channel *ch; 142 }; 143 144 struct accel_io_channel { 145 struct spdk_io_channel *module_ch[SPDK_ACCEL_OPC_LAST]; 146 struct spdk_io_channel *driver_channel; 147 void *task_pool_base; 148 struct spdk_accel_sequence *seq_pool_base; 149 struct accel_buffer *buf_pool_base; 150 struct spdk_accel_task_aux_data *task_aux_data_base; 151 STAILQ_HEAD(, spdk_accel_task) task_pool; 152 SLIST_HEAD(, spdk_accel_task_aux_data) task_aux_data_pool; 153 SLIST_HEAD(, spdk_accel_sequence) seq_pool; 154 SLIST_HEAD(, accel_buffer) buf_pool; 155 struct spdk_iobuf_channel iobuf; 156 struct accel_stats stats; 157 }; 158 159 TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task); 160 161 struct spdk_accel_sequence { 162 struct accel_io_channel *ch; 163 struct accel_sequence_tasks tasks; 164 SLIST_HEAD(, accel_buffer) bounce_bufs; 165 int status; 166 /* state uses enum accel_sequence_state */ 167 uint8_t state; 168 bool in_process_sequence; 169 spdk_accel_completion_cb cb_fn; 170 void *cb_arg; 171 SLIST_ENTRY(spdk_accel_sequence) link; 172 }; 173 SPDK_STATIC_ASSERT(sizeof(struct spdk_accel_sequence) == 64, "invalid size"); 174 175 #define accel_update_stats(ch, event, v) \ 176 do { \ 177 (ch)->stats.event += (v); \ 178 } while (0) 179 180 #define accel_update_task_stats(ch, task, event, v) \ 181 accel_update_stats(ch, operations[(task)->op_code].event, v) 182 183 static inline void accel_sequence_task_cb(struct spdk_accel_sequence *seq, 184 struct spdk_accel_task *task, int status); 185 186 static inline void 187 accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state) 188 { 189 SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq, 190 ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state)); 191 assert(seq->state != ACCEL_SEQUENCE_STATE_ERROR || state == ACCEL_SEQUENCE_STATE_ERROR); 192 seq->state = state; 193 } 194 195 static void 196 accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status) 197 { 198 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR); 199 assert(status != 0); 200 seq->status = status; 201 } 202 203 int 204 spdk_accel_get_opc_module_name(enum spdk_accel_opcode opcode, const char **module_name) 205 { 206 if (opcode >= SPDK_ACCEL_OPC_LAST) { 207 /* invalid opcode */ 208 return -EINVAL; 209 } 210 211 if (g_modules_opc[opcode].module) { 212 *module_name = g_modules_opc[opcode].module->name; 213 } else { 214 return -ENOENT; 215 } 216 217 return 0; 218 } 219 220 void 221 _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn) 222 { 223 struct spdk_accel_module_if *accel_module; 224 enum spdk_accel_opcode opcode; 225 int j = 0; 226 227 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 228 for (opcode = 0; opcode < SPDK_ACCEL_OPC_LAST; opcode++) { 229 if (accel_module->supports_opcode(opcode)) { 230 info->ops[j] = opcode; 231 j++; 232 } 233 } 234 info->name = accel_module->name; 235 info->num_ops = j; 236 fn(info); 237 j = 0; 238 } 239 } 240 241 const char * 242 spdk_accel_get_opcode_name(enum spdk_accel_opcode opcode) 243 { 244 if (opcode < SPDK_ACCEL_OPC_LAST) { 245 return g_opcode_strings[opcode]; 246 } 247 248 return NULL; 249 } 250 251 int 252 spdk_accel_assign_opc(enum spdk_accel_opcode opcode, const char *name) 253 { 254 char *copy; 255 256 if (g_modules_started == true) { 257 /* we don't allow re-assignment once things have started */ 258 return -EINVAL; 259 } 260 261 if (opcode >= SPDK_ACCEL_OPC_LAST) { 262 /* invalid opcode */ 263 return -EINVAL; 264 } 265 266 copy = strdup(name); 267 if (copy == NULL) { 268 return -ENOMEM; 269 } 270 271 /* module selection will be validated after the framework starts. */ 272 free(g_modules_opc_override[opcode]); 273 g_modules_opc_override[opcode] = copy; 274 275 return 0; 276 } 277 278 inline static struct spdk_accel_task * 279 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg) 280 { 281 struct spdk_accel_task *accel_task; 282 283 accel_task = STAILQ_FIRST(&accel_ch->task_pool); 284 if (spdk_unlikely(accel_task == NULL)) { 285 accel_update_stats(accel_ch, retry.task, 1); 286 return NULL; 287 } 288 289 accel_update_stats(accel_ch, task_outstanding, 1); 290 STAILQ_REMOVE_HEAD(&accel_ch->task_pool, link); 291 accel_task->link.stqe_next = NULL; 292 293 accel_task->cb_fn = cb_fn; 294 accel_task->cb_arg = cb_arg; 295 accel_task->accel_ch = accel_ch; 296 accel_task->s.iovs = NULL; 297 accel_task->d.iovs = NULL; 298 299 return accel_task; 300 } 301 302 static void 303 _put_task(struct accel_io_channel *ch, struct spdk_accel_task *task) 304 { 305 STAILQ_INSERT_HEAD(&ch->task_pool, task, link); 306 accel_update_stats(ch, task_outstanding, -1); 307 } 308 309 void 310 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status) 311 { 312 struct accel_io_channel *accel_ch = accel_task->accel_ch; 313 spdk_accel_completion_cb cb_fn; 314 void *cb_arg; 315 316 accel_update_task_stats(accel_ch, accel_task, executed, 1); 317 accel_update_task_stats(accel_ch, accel_task, num_bytes, accel_task->nbytes); 318 if (spdk_unlikely(status != 0)) { 319 accel_update_task_stats(accel_ch, accel_task, failed, 1); 320 } 321 322 if (accel_task->seq) { 323 accel_sequence_task_cb(accel_task->seq, accel_task, status); 324 return; 325 } 326 327 cb_fn = accel_task->cb_fn; 328 cb_arg = accel_task->cb_arg; 329 330 if (accel_task->has_aux) { 331 SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task->aux, link); 332 accel_task->aux = NULL; 333 accel_task->has_aux = false; 334 } 335 336 /* We should put the accel_task into the list firstly in order to avoid 337 * the accel task list is exhausted when there is recursive call to 338 * allocate accel_task in user's call back function (cb_fn) 339 */ 340 _put_task(accel_ch, accel_task); 341 342 cb_fn(cb_arg, status); 343 } 344 345 static inline int 346 accel_submit_task(struct accel_io_channel *accel_ch, struct spdk_accel_task *task) 347 { 348 struct spdk_io_channel *module_ch = accel_ch->module_ch[task->op_code]; 349 struct spdk_accel_module_if *module = g_modules_opc[task->op_code].module; 350 int rc; 351 352 rc = module->submit_tasks(module_ch, task); 353 if (spdk_unlikely(rc != 0)) { 354 accel_update_task_stats(accel_ch, task, failed, 1); 355 } 356 357 return rc; 358 } 359 360 static inline uint64_t 361 accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt) 362 { 363 uint64_t result = 0; 364 uint32_t i; 365 366 for (i = 0; i < iovcnt; ++i) { 367 result += iovs[i].iov_len; 368 } 369 370 return result; 371 } 372 373 #define ACCEL_TASK_ALLOC_AUX_BUF(task) \ 374 do { \ 375 (task)->aux = SLIST_FIRST(&(task)->accel_ch->task_aux_data_pool); \ 376 if (spdk_unlikely(!(task)->aux)) { \ 377 SPDK_ERRLOG("Fatal problem, aux data was not allocated\n"); \ 378 _put_task(task->accel_ch, task); \ 379 assert(0); \ 380 return -ENOMEM; \ 381 } \ 382 SLIST_REMOVE_HEAD(&(task)->accel_ch->task_aux_data_pool, link); \ 383 (task)->has_aux = true; \ 384 } while (0) 385 386 /* Accel framework public API for copy function */ 387 int 388 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src, 389 uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg) 390 { 391 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 392 struct spdk_accel_task *accel_task; 393 394 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 395 if (spdk_unlikely(accel_task == NULL)) { 396 return -ENOMEM; 397 } 398 399 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 400 401 accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC]; 402 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 403 accel_task->d.iovs[0].iov_base = dst; 404 accel_task->d.iovs[0].iov_len = nbytes; 405 accel_task->d.iovcnt = 1; 406 accel_task->s.iovs[0].iov_base = src; 407 accel_task->s.iovs[0].iov_len = nbytes; 408 accel_task->s.iovcnt = 1; 409 accel_task->nbytes = nbytes; 410 accel_task->op_code = SPDK_ACCEL_OPC_COPY; 411 accel_task->src_domain = NULL; 412 accel_task->dst_domain = NULL; 413 414 return accel_submit_task(accel_ch, accel_task); 415 } 416 417 /* Accel framework public API for dual cast copy function */ 418 int 419 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1, 420 void *dst2, void *src, uint64_t nbytes, 421 spdk_accel_completion_cb cb_fn, void *cb_arg) 422 { 423 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 424 struct spdk_accel_task *accel_task; 425 426 if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) { 427 SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n"); 428 return -EINVAL; 429 } 430 431 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 432 if (spdk_unlikely(accel_task == NULL)) { 433 return -ENOMEM; 434 } 435 436 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 437 438 accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC]; 439 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 440 accel_task->d2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST2]; 441 accel_task->d.iovs[0].iov_base = dst1; 442 accel_task->d.iovs[0].iov_len = nbytes; 443 accel_task->d.iovcnt = 1; 444 accel_task->d2.iovs[0].iov_base = dst2; 445 accel_task->d2.iovs[0].iov_len = nbytes; 446 accel_task->d2.iovcnt = 1; 447 accel_task->s.iovs[0].iov_base = src; 448 accel_task->s.iovs[0].iov_len = nbytes; 449 accel_task->s.iovcnt = 1; 450 accel_task->nbytes = nbytes; 451 accel_task->op_code = SPDK_ACCEL_OPC_DUALCAST; 452 accel_task->src_domain = NULL; 453 accel_task->dst_domain = NULL; 454 455 return accel_submit_task(accel_ch, accel_task); 456 } 457 458 /* Accel framework public API for compare function */ 459 460 int 461 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1, 462 void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn, 463 void *cb_arg) 464 { 465 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 466 struct spdk_accel_task *accel_task; 467 468 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 469 if (spdk_unlikely(accel_task == NULL)) { 470 return -ENOMEM; 471 } 472 473 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 474 475 accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC]; 476 accel_task->s2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC2]; 477 accel_task->s.iovs[0].iov_base = src1; 478 accel_task->s.iovs[0].iov_len = nbytes; 479 accel_task->s.iovcnt = 1; 480 accel_task->s2.iovs[0].iov_base = src2; 481 accel_task->s2.iovs[0].iov_len = nbytes; 482 accel_task->s2.iovcnt = 1; 483 accel_task->nbytes = nbytes; 484 accel_task->op_code = SPDK_ACCEL_OPC_COMPARE; 485 accel_task->src_domain = NULL; 486 accel_task->dst_domain = NULL; 487 488 return accel_submit_task(accel_ch, accel_task); 489 } 490 491 /* Accel framework public API for fill function */ 492 int 493 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst, 494 uint8_t fill, uint64_t nbytes, 495 spdk_accel_completion_cb cb_fn, void *cb_arg) 496 { 497 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 498 struct spdk_accel_task *accel_task; 499 500 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 501 if (spdk_unlikely(accel_task == NULL)) { 502 return -ENOMEM; 503 } 504 505 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 506 507 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 508 accel_task->d.iovs[0].iov_base = dst; 509 accel_task->d.iovs[0].iov_len = nbytes; 510 accel_task->d.iovcnt = 1; 511 accel_task->nbytes = nbytes; 512 memset(&accel_task->fill_pattern, fill, sizeof(uint64_t)); 513 accel_task->op_code = SPDK_ACCEL_OPC_FILL; 514 accel_task->src_domain = NULL; 515 accel_task->dst_domain = NULL; 516 517 return accel_submit_task(accel_ch, accel_task); 518 } 519 520 /* Accel framework public API for CRC-32C function */ 521 int 522 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst, 523 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn, 524 void *cb_arg) 525 { 526 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 527 struct spdk_accel_task *accel_task; 528 529 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 530 if (spdk_unlikely(accel_task == NULL)) { 531 return -ENOMEM; 532 } 533 534 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 535 536 accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC]; 537 accel_task->s.iovs[0].iov_base = src; 538 accel_task->s.iovs[0].iov_len = nbytes; 539 accel_task->s.iovcnt = 1; 540 accel_task->nbytes = nbytes; 541 accel_task->crc_dst = crc_dst; 542 accel_task->seed = seed; 543 accel_task->op_code = SPDK_ACCEL_OPC_CRC32C; 544 accel_task->src_domain = NULL; 545 accel_task->dst_domain = NULL; 546 547 return accel_submit_task(accel_ch, accel_task); 548 } 549 550 /* Accel framework public API for chained CRC-32C function */ 551 int 552 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst, 553 struct iovec *iov, uint32_t iov_cnt, uint32_t seed, 554 spdk_accel_completion_cb cb_fn, void *cb_arg) 555 { 556 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 557 struct spdk_accel_task *accel_task; 558 559 if (iov == NULL) { 560 SPDK_ERRLOG("iov should not be NULL"); 561 return -EINVAL; 562 } 563 564 if (!iov_cnt) { 565 SPDK_ERRLOG("iovcnt should not be zero value\n"); 566 return -EINVAL; 567 } 568 569 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 570 if (spdk_unlikely(accel_task == NULL)) { 571 SPDK_ERRLOG("no memory\n"); 572 assert(0); 573 return -ENOMEM; 574 } 575 576 accel_task->s.iovs = iov; 577 accel_task->s.iovcnt = iov_cnt; 578 accel_task->nbytes = accel_get_iovlen(iov, iov_cnt); 579 accel_task->crc_dst = crc_dst; 580 accel_task->seed = seed; 581 accel_task->op_code = SPDK_ACCEL_OPC_CRC32C; 582 accel_task->src_domain = NULL; 583 accel_task->dst_domain = NULL; 584 585 return accel_submit_task(accel_ch, accel_task); 586 } 587 588 /* Accel framework public API for copy with CRC-32C function */ 589 int 590 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst, 591 void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes, 592 spdk_accel_completion_cb cb_fn, void *cb_arg) 593 { 594 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 595 struct spdk_accel_task *accel_task; 596 597 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 598 if (spdk_unlikely(accel_task == NULL)) { 599 return -ENOMEM; 600 } 601 602 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 603 604 accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC]; 605 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 606 accel_task->d.iovs[0].iov_base = dst; 607 accel_task->d.iovs[0].iov_len = nbytes; 608 accel_task->d.iovcnt = 1; 609 accel_task->s.iovs[0].iov_base = src; 610 accel_task->s.iovs[0].iov_len = nbytes; 611 accel_task->s.iovcnt = 1; 612 accel_task->nbytes = nbytes; 613 accel_task->crc_dst = crc_dst; 614 accel_task->seed = seed; 615 accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C; 616 accel_task->src_domain = NULL; 617 accel_task->dst_domain = NULL; 618 619 return accel_submit_task(accel_ch, accel_task); 620 } 621 622 /* Accel framework public API for chained copy + CRC-32C function */ 623 int 624 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst, 625 struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst, 626 uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg) 627 { 628 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 629 struct spdk_accel_task *accel_task; 630 uint64_t nbytes; 631 632 if (src_iovs == NULL) { 633 SPDK_ERRLOG("iov should not be NULL"); 634 return -EINVAL; 635 } 636 637 if (!iov_cnt) { 638 SPDK_ERRLOG("iovcnt should not be zero value\n"); 639 return -EINVAL; 640 } 641 642 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 643 if (spdk_unlikely(accel_task == NULL)) { 644 SPDK_ERRLOG("no memory\n"); 645 assert(0); 646 return -ENOMEM; 647 } 648 649 nbytes = accel_get_iovlen(src_iovs, iov_cnt); 650 651 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 652 653 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 654 accel_task->d.iovs[0].iov_base = dst; 655 accel_task->d.iovs[0].iov_len = nbytes; 656 accel_task->d.iovcnt = 1; 657 accel_task->s.iovs = src_iovs; 658 accel_task->s.iovcnt = iov_cnt; 659 accel_task->nbytes = nbytes; 660 accel_task->crc_dst = crc_dst; 661 accel_task->seed = seed; 662 accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C; 663 accel_task->src_domain = NULL; 664 accel_task->dst_domain = NULL; 665 666 return accel_submit_task(accel_ch, accel_task); 667 } 668 669 int 670 spdk_accel_get_compress_level_range(enum spdk_accel_comp_algo comp_algo, 671 uint32_t *min_level, uint32_t *max_level) 672 { 673 struct spdk_accel_module_if *module = g_modules_opc[SPDK_ACCEL_OPC_COMPRESS].module; 674 675 if (module->get_compress_level_range == NULL) { 676 SPDK_ERRLOG("Module %s doesn't implement callback fn get_compress_level_range.\n", module->name); 677 return -ENOTSUP; 678 } 679 680 return module->get_compress_level_range(comp_algo, min_level, max_level); 681 } 682 683 static int 684 _accel_check_comp_algo(enum spdk_accel_comp_algo comp_algo) 685 { 686 struct spdk_accel_module_if *module = g_modules_opc[SPDK_ACCEL_OPC_COMPRESS].module; 687 688 if (!module->compress_supports_algo || !module->compress_supports_algo(comp_algo)) { 689 SPDK_ERRLOG("Module %s doesn't support compression algo %d\n", module->name, comp_algo); 690 return -ENOTSUP; 691 } 692 693 return 0; 694 } 695 696 int 697 spdk_accel_submit_compress_ext(struct spdk_io_channel *ch, void *dst, uint64_t nbytes, 698 struct iovec *src_iovs, size_t src_iovcnt, 699 enum spdk_accel_comp_algo comp_algo, uint32_t comp_level, 700 uint32_t *output_size, spdk_accel_completion_cb cb_fn, void *cb_arg) 701 { 702 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 703 struct spdk_accel_task *accel_task; 704 int rc; 705 706 rc = _accel_check_comp_algo(comp_algo); 707 if (spdk_unlikely(rc != 0)) { 708 return rc; 709 } 710 711 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 712 if (spdk_unlikely(accel_task == NULL)) { 713 return -ENOMEM; 714 } 715 716 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 717 718 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 719 accel_task->d.iovs[0].iov_base = dst; 720 accel_task->d.iovs[0].iov_len = nbytes; 721 accel_task->d.iovcnt = 1; 722 accel_task->output_size = output_size; 723 accel_task->s.iovs = src_iovs; 724 accel_task->s.iovcnt = src_iovcnt; 725 accel_task->nbytes = nbytes; 726 accel_task->op_code = SPDK_ACCEL_OPC_COMPRESS; 727 accel_task->src_domain = NULL; 728 accel_task->dst_domain = NULL; 729 accel_task->comp.algo = comp_algo; 730 accel_task->comp.level = comp_level; 731 732 return accel_submit_task(accel_ch, accel_task); 733 } 734 735 int 736 spdk_accel_submit_decompress_ext(struct spdk_io_channel *ch, struct iovec *dst_iovs, 737 size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt, 738 enum spdk_accel_comp_algo decomp_algo, uint32_t *output_size, 739 spdk_accel_completion_cb cb_fn, void *cb_arg) 740 { 741 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 742 struct spdk_accel_task *accel_task; 743 int rc; 744 745 rc = _accel_check_comp_algo(decomp_algo); 746 if (spdk_unlikely(rc != 0)) { 747 return rc; 748 } 749 750 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 751 if (spdk_unlikely(accel_task == NULL)) { 752 return -ENOMEM; 753 } 754 755 accel_task->output_size = output_size; 756 accel_task->s.iovs = src_iovs; 757 accel_task->s.iovcnt = src_iovcnt; 758 accel_task->d.iovs = dst_iovs; 759 accel_task->d.iovcnt = dst_iovcnt; 760 accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 761 accel_task->op_code = SPDK_ACCEL_OPC_DECOMPRESS; 762 accel_task->src_domain = NULL; 763 accel_task->dst_domain = NULL; 764 accel_task->comp.algo = decomp_algo; 765 766 return accel_submit_task(accel_ch, accel_task); 767 } 768 769 int 770 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes, 771 struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size, 772 spdk_accel_completion_cb cb_fn, void *cb_arg) 773 { 774 return spdk_accel_submit_compress_ext(ch, dst, nbytes, src_iovs, src_iovcnt, 775 SPDK_ACCEL_COMP_ALGO_DEFLATE, 1, output_size, cb_fn, cb_arg); 776 } 777 778 int 779 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs, 780 size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt, 781 uint32_t *output_size, spdk_accel_completion_cb cb_fn, 782 void *cb_arg) 783 { 784 return spdk_accel_submit_decompress_ext(ch, dst_iovs, dst_iovcnt, src_iovs, src_iovcnt, 785 SPDK_ACCEL_COMP_ALGO_DEFLATE, output_size, cb_fn, cb_arg); 786 } 787 788 int 789 spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key, 790 struct iovec *dst_iovs, uint32_t dst_iovcnt, 791 struct iovec *src_iovs, uint32_t src_iovcnt, 792 uint64_t iv, uint32_t block_size, 793 spdk_accel_completion_cb cb_fn, void *cb_arg) 794 { 795 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 796 struct spdk_accel_task *accel_task; 797 798 if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) { 799 return -EINVAL; 800 } 801 802 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 803 if (spdk_unlikely(accel_task == NULL)) { 804 return -ENOMEM; 805 } 806 807 accel_task->crypto_key = key; 808 accel_task->s.iovs = src_iovs; 809 accel_task->s.iovcnt = src_iovcnt; 810 accel_task->d.iovs = dst_iovs; 811 accel_task->d.iovcnt = dst_iovcnt; 812 accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 813 accel_task->iv = iv; 814 accel_task->block_size = block_size; 815 accel_task->op_code = SPDK_ACCEL_OPC_ENCRYPT; 816 accel_task->src_domain = NULL; 817 accel_task->dst_domain = NULL; 818 819 return accel_submit_task(accel_ch, accel_task); 820 } 821 822 int 823 spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key, 824 struct iovec *dst_iovs, uint32_t dst_iovcnt, 825 struct iovec *src_iovs, uint32_t src_iovcnt, 826 uint64_t iv, uint32_t block_size, 827 spdk_accel_completion_cb cb_fn, void *cb_arg) 828 { 829 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 830 struct spdk_accel_task *accel_task; 831 832 if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) { 833 return -EINVAL; 834 } 835 836 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 837 if (spdk_unlikely(accel_task == NULL)) { 838 return -ENOMEM; 839 } 840 841 accel_task->crypto_key = key; 842 accel_task->s.iovs = src_iovs; 843 accel_task->s.iovcnt = src_iovcnt; 844 accel_task->d.iovs = dst_iovs; 845 accel_task->d.iovcnt = dst_iovcnt; 846 accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 847 accel_task->iv = iv; 848 accel_task->block_size = block_size; 849 accel_task->op_code = SPDK_ACCEL_OPC_DECRYPT; 850 accel_task->src_domain = NULL; 851 accel_task->dst_domain = NULL; 852 853 return accel_submit_task(accel_ch, accel_task); 854 } 855 856 int 857 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs, 858 uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg) 859 { 860 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 861 struct spdk_accel_task *accel_task; 862 863 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 864 if (spdk_unlikely(accel_task == NULL)) { 865 return -ENOMEM; 866 } 867 868 ACCEL_TASK_ALLOC_AUX_BUF(accel_task); 869 870 accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 871 accel_task->nsrcs.srcs = sources; 872 accel_task->nsrcs.cnt = nsrcs; 873 accel_task->d.iovs[0].iov_base = dst; 874 accel_task->d.iovs[0].iov_len = nbytes; 875 accel_task->d.iovcnt = 1; 876 accel_task->nbytes = nbytes; 877 accel_task->op_code = SPDK_ACCEL_OPC_XOR; 878 accel_task->src_domain = NULL; 879 accel_task->dst_domain = NULL; 880 881 return accel_submit_task(accel_ch, accel_task); 882 } 883 884 int 885 spdk_accel_submit_dif_verify(struct spdk_io_channel *ch, 886 struct iovec *iovs, size_t iovcnt, uint32_t num_blocks, 887 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err, 888 spdk_accel_completion_cb cb_fn, void *cb_arg) 889 { 890 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 891 struct spdk_accel_task *accel_task; 892 893 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 894 if (accel_task == NULL) { 895 return -ENOMEM; 896 } 897 898 accel_task->s.iovs = iovs; 899 accel_task->s.iovcnt = iovcnt; 900 accel_task->dif.ctx = ctx; 901 accel_task->dif.err = err; 902 accel_task->dif.num_blocks = num_blocks; 903 accel_task->nbytes = num_blocks * ctx->block_size; 904 accel_task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY; 905 accel_task->src_domain = NULL; 906 accel_task->dst_domain = NULL; 907 908 return accel_submit_task(accel_ch, accel_task); 909 } 910 911 int 912 spdk_accel_submit_dif_generate(struct spdk_io_channel *ch, 913 struct iovec *iovs, size_t iovcnt, uint32_t num_blocks, 914 const struct spdk_dif_ctx *ctx, 915 spdk_accel_completion_cb cb_fn, void *cb_arg) 916 { 917 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 918 struct spdk_accel_task *accel_task; 919 920 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 921 if (accel_task == NULL) { 922 return -ENOMEM; 923 } 924 925 accel_task->s.iovs = iovs; 926 accel_task->s.iovcnt = iovcnt; 927 accel_task->dif.ctx = ctx; 928 accel_task->dif.num_blocks = num_blocks; 929 accel_task->nbytes = num_blocks * ctx->block_size; 930 accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE; 931 accel_task->src_domain = NULL; 932 accel_task->dst_domain = NULL; 933 934 return accel_submit_task(accel_ch, accel_task); 935 } 936 937 int 938 spdk_accel_submit_dif_generate_copy(struct spdk_io_channel *ch, struct iovec *dst_iovs, 939 size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt, 940 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 941 spdk_accel_completion_cb cb_fn, void *cb_arg) 942 { 943 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 944 struct spdk_accel_task *accel_task; 945 946 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 947 if (accel_task == NULL) { 948 return -ENOMEM; 949 } 950 951 accel_task->s.iovs = src_iovs; 952 accel_task->s.iovcnt = src_iovcnt; 953 accel_task->d.iovs = dst_iovs; 954 accel_task->d.iovcnt = dst_iovcnt; 955 accel_task->dif.ctx = ctx; 956 accel_task->dif.num_blocks = num_blocks; 957 accel_task->nbytes = num_blocks * ctx->block_size; 958 accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE_COPY; 959 accel_task->src_domain = NULL; 960 accel_task->dst_domain = NULL; 961 962 return accel_submit_task(accel_ch, accel_task); 963 } 964 965 int 966 spdk_accel_submit_dif_verify_copy(struct spdk_io_channel *ch, 967 struct iovec *dst_iovs, size_t dst_iovcnt, 968 struct iovec *src_iovs, size_t src_iovcnt, uint32_t num_blocks, 969 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err, 970 spdk_accel_completion_cb cb_fn, void *cb_arg) 971 { 972 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 973 struct spdk_accel_task *accel_task; 974 975 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 976 if (accel_task == NULL) { 977 return -ENOMEM; 978 } 979 980 accel_task->s.iovs = src_iovs; 981 accel_task->s.iovcnt = src_iovcnt; 982 accel_task->d.iovs = dst_iovs; 983 accel_task->d.iovcnt = dst_iovcnt; 984 accel_task->dif.ctx = ctx; 985 accel_task->dif.err = err; 986 accel_task->dif.num_blocks = num_blocks; 987 accel_task->nbytes = num_blocks * ctx->block_size; 988 accel_task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY_COPY; 989 accel_task->src_domain = NULL; 990 accel_task->dst_domain = NULL; 991 992 return accel_submit_task(accel_ch, accel_task); 993 } 994 995 int 996 spdk_accel_submit_dix_generate(struct spdk_io_channel *ch, struct iovec *iovs, 997 size_t iovcnt, struct iovec *md_iov, uint32_t num_blocks, 998 const struct spdk_dif_ctx *ctx, spdk_accel_completion_cb cb_fn, 999 void *cb_arg) 1000 { 1001 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1002 struct spdk_accel_task *accel_task; 1003 1004 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 1005 if (accel_task == NULL) { 1006 return -ENOMEM; 1007 } 1008 1009 accel_task->s.iovs = iovs; 1010 accel_task->s.iovcnt = iovcnt; 1011 accel_task->d.iovs = md_iov; 1012 accel_task->d.iovcnt = 1; 1013 accel_task->dif.ctx = ctx; 1014 accel_task->dif.num_blocks = num_blocks; 1015 accel_task->nbytes = num_blocks * ctx->block_size; 1016 accel_task->op_code = SPDK_ACCEL_OPC_DIX_GENERATE; 1017 accel_task->src_domain = NULL; 1018 accel_task->dst_domain = NULL; 1019 1020 return accel_submit_task(accel_ch, accel_task); 1021 } 1022 1023 int 1024 spdk_accel_submit_dix_verify(struct spdk_io_channel *ch, struct iovec *iovs, 1025 size_t iovcnt, struct iovec *md_iov, uint32_t num_blocks, 1026 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err, 1027 spdk_accel_completion_cb cb_fn, void *cb_arg) 1028 { 1029 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1030 struct spdk_accel_task *accel_task; 1031 1032 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 1033 if (accel_task == NULL) { 1034 return -ENOMEM; 1035 } 1036 1037 accel_task->s.iovs = iovs; 1038 accel_task->s.iovcnt = iovcnt; 1039 accel_task->d.iovs = md_iov; 1040 accel_task->d.iovcnt = 1; 1041 accel_task->dif.ctx = ctx; 1042 accel_task->dif.err = err; 1043 accel_task->dif.num_blocks = num_blocks; 1044 accel_task->nbytes = num_blocks * ctx->block_size; 1045 accel_task->op_code = SPDK_ACCEL_OPC_DIX_VERIFY; 1046 accel_task->src_domain = NULL; 1047 accel_task->dst_domain = NULL; 1048 1049 return accel_submit_task(accel_ch, accel_task); 1050 } 1051 1052 static inline struct accel_buffer * 1053 accel_get_buf(struct accel_io_channel *ch, uint64_t len) 1054 { 1055 struct accel_buffer *buf; 1056 1057 buf = SLIST_FIRST(&ch->buf_pool); 1058 if (spdk_unlikely(buf == NULL)) { 1059 accel_update_stats(ch, retry.bufdesc, 1); 1060 return NULL; 1061 } 1062 1063 SLIST_REMOVE_HEAD(&ch->buf_pool, link); 1064 buf->len = len; 1065 buf->buf = NULL; 1066 buf->seq = NULL; 1067 buf->cb_fn = NULL; 1068 1069 return buf; 1070 } 1071 1072 static inline void 1073 accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf) 1074 { 1075 if (buf->buf != NULL) { 1076 spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len); 1077 } 1078 1079 SLIST_INSERT_HEAD(&ch->buf_pool, buf, link); 1080 } 1081 1082 static inline struct spdk_accel_sequence * 1083 accel_sequence_get(struct accel_io_channel *ch) 1084 { 1085 struct spdk_accel_sequence *seq; 1086 1087 assert(g_opts.task_count >= ch->stats.task_outstanding); 1088 1089 /* Sequence cannot be allocated if number of available task objects cannot satisfy required limit. 1090 * This is to prevent potential dead lock when few requests are pending task resource and none can 1091 * advance the processing. This solution should work only if there is single async operation after 1092 * sequence obj obtained, so assume that is possible to happen with io buffer allocation now, if 1093 * there are more async operations then solution should be improved. */ 1094 if (spdk_unlikely(g_opts.task_count - ch->stats.task_outstanding < ACCEL_TASKS_IN_SEQUENCE_LIMIT)) { 1095 return NULL; 1096 } 1097 1098 seq = SLIST_FIRST(&ch->seq_pool); 1099 if (spdk_unlikely(seq == NULL)) { 1100 accel_update_stats(ch, retry.sequence, 1); 1101 return NULL; 1102 } 1103 1104 accel_update_stats(ch, sequence_outstanding, 1); 1105 SLIST_REMOVE_HEAD(&ch->seq_pool, link); 1106 1107 TAILQ_INIT(&seq->tasks); 1108 SLIST_INIT(&seq->bounce_bufs); 1109 1110 seq->ch = ch; 1111 seq->status = 0; 1112 seq->state = ACCEL_SEQUENCE_STATE_INIT; 1113 seq->in_process_sequence = false; 1114 1115 return seq; 1116 } 1117 1118 static inline void 1119 accel_sequence_put(struct spdk_accel_sequence *seq) 1120 { 1121 struct accel_io_channel *ch = seq->ch; 1122 struct accel_buffer *buf; 1123 1124 while (!SLIST_EMPTY(&seq->bounce_bufs)) { 1125 buf = SLIST_FIRST(&seq->bounce_bufs); 1126 SLIST_REMOVE_HEAD(&seq->bounce_bufs, link); 1127 accel_put_buf(seq->ch, buf); 1128 } 1129 1130 assert(TAILQ_EMPTY(&seq->tasks)); 1131 seq->ch = NULL; 1132 1133 SLIST_INSERT_HEAD(&ch->seq_pool, seq, link); 1134 accel_update_stats(ch, sequence_outstanding, -1); 1135 } 1136 1137 static inline struct spdk_accel_task * 1138 accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq, 1139 spdk_accel_step_cb cb_fn, void *cb_arg) 1140 { 1141 struct spdk_accel_task *task; 1142 1143 task = _get_task(ch, NULL, NULL); 1144 if (spdk_unlikely(task == NULL)) { 1145 return task; 1146 } 1147 1148 task->step_cb_fn = cb_fn; 1149 task->cb_arg = cb_arg; 1150 task->seq = seq; 1151 1152 return task; 1153 } 1154 1155 int 1156 spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1157 struct iovec *dst_iovs, uint32_t dst_iovcnt, 1158 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1159 struct iovec *src_iovs, uint32_t src_iovcnt, 1160 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1161 spdk_accel_step_cb cb_fn, void *cb_arg) 1162 { 1163 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1164 struct spdk_accel_task *task; 1165 struct spdk_accel_sequence *seq = *pseq; 1166 1167 if (seq == NULL) { 1168 seq = accel_sequence_get(accel_ch); 1169 if (spdk_unlikely(seq == NULL)) { 1170 return -ENOMEM; 1171 } 1172 } 1173 1174 assert(seq->ch == accel_ch); 1175 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1176 if (spdk_unlikely(task == NULL)) { 1177 if (*pseq == NULL) { 1178 accel_sequence_put(seq); 1179 } 1180 1181 return -ENOMEM; 1182 } 1183 1184 task->dst_domain = dst_domain; 1185 task->dst_domain_ctx = dst_domain_ctx; 1186 task->d.iovs = dst_iovs; 1187 task->d.iovcnt = dst_iovcnt; 1188 task->src_domain = src_domain; 1189 task->src_domain_ctx = src_domain_ctx; 1190 task->s.iovs = src_iovs; 1191 task->s.iovcnt = src_iovcnt; 1192 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1193 task->op_code = SPDK_ACCEL_OPC_COPY; 1194 1195 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1196 *pseq = seq; 1197 1198 return 0; 1199 } 1200 1201 int 1202 spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1203 void *buf, uint64_t len, 1204 struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern, 1205 spdk_accel_step_cb cb_fn, void *cb_arg) 1206 { 1207 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1208 struct spdk_accel_task *task; 1209 struct spdk_accel_sequence *seq = *pseq; 1210 1211 if (seq == NULL) { 1212 seq = accel_sequence_get(accel_ch); 1213 if (spdk_unlikely(seq == NULL)) { 1214 return -ENOMEM; 1215 } 1216 } 1217 1218 assert(seq->ch == accel_ch); 1219 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1220 if (spdk_unlikely(task == NULL)) { 1221 if (*pseq == NULL) { 1222 accel_sequence_put(seq); 1223 } 1224 1225 return -ENOMEM; 1226 } 1227 1228 memset(&task->fill_pattern, pattern, sizeof(uint64_t)); 1229 1230 task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool); 1231 if (spdk_unlikely(!task->aux)) { 1232 SPDK_ERRLOG("Fatal problem, aux data was not allocated\n"); 1233 if (*pseq == NULL) { 1234 accel_sequence_put((seq)); 1235 } 1236 1237 task->seq = NULL; 1238 _put_task(task->accel_ch, task); 1239 assert(0); 1240 return -ENOMEM; 1241 } 1242 SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link); 1243 task->has_aux = true; 1244 1245 task->d.iovs = &task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST]; 1246 task->d.iovs[0].iov_base = buf; 1247 task->d.iovs[0].iov_len = len; 1248 task->d.iovcnt = 1; 1249 task->nbytes = len; 1250 task->src_domain = NULL; 1251 task->dst_domain = domain; 1252 task->dst_domain_ctx = domain_ctx; 1253 task->op_code = SPDK_ACCEL_OPC_FILL; 1254 1255 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1256 *pseq = seq; 1257 1258 return 0; 1259 } 1260 1261 int 1262 spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1263 struct iovec *dst_iovs, size_t dst_iovcnt, 1264 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1265 struct iovec *src_iovs, size_t src_iovcnt, 1266 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1267 spdk_accel_step_cb cb_fn, void *cb_arg) 1268 { 1269 return spdk_accel_append_decompress_ext(pseq, ch, dst_iovs, dst_iovcnt, dst_domain, 1270 dst_domain_ctx, src_iovs, src_iovcnt, src_domain, 1271 src_domain_ctx, SPDK_ACCEL_COMP_ALGO_DEFLATE, 1272 cb_fn, cb_arg); 1273 } 1274 1275 int 1276 spdk_accel_append_decompress_ext(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1277 struct iovec *dst_iovs, size_t dst_iovcnt, 1278 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1279 struct iovec *src_iovs, size_t src_iovcnt, 1280 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1281 enum spdk_accel_comp_algo decomp_algo, 1282 spdk_accel_step_cb cb_fn, void *cb_arg) 1283 { 1284 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1285 struct spdk_accel_task *task; 1286 struct spdk_accel_sequence *seq = *pseq; 1287 int rc; 1288 1289 rc = _accel_check_comp_algo(decomp_algo); 1290 if (spdk_unlikely(rc != 0)) { 1291 return rc; 1292 } 1293 1294 if (seq == NULL) { 1295 seq = accel_sequence_get(accel_ch); 1296 if (spdk_unlikely(seq == NULL)) { 1297 return -ENOMEM; 1298 } 1299 } 1300 1301 assert(seq->ch == accel_ch); 1302 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1303 if (spdk_unlikely(task == NULL)) { 1304 if (*pseq == NULL) { 1305 accel_sequence_put(seq); 1306 } 1307 1308 return -ENOMEM; 1309 } 1310 1311 /* TODO: support output_size for chaining */ 1312 task->output_size = NULL; 1313 task->dst_domain = dst_domain; 1314 task->dst_domain_ctx = dst_domain_ctx; 1315 task->d.iovs = dst_iovs; 1316 task->d.iovcnt = dst_iovcnt; 1317 task->src_domain = src_domain; 1318 task->src_domain_ctx = src_domain_ctx; 1319 task->s.iovs = src_iovs; 1320 task->s.iovcnt = src_iovcnt; 1321 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1322 task->op_code = SPDK_ACCEL_OPC_DECOMPRESS; 1323 task->comp.algo = decomp_algo; 1324 1325 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1326 *pseq = seq; 1327 1328 return 0; 1329 } 1330 1331 int 1332 spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1333 struct spdk_accel_crypto_key *key, 1334 struct iovec *dst_iovs, uint32_t dst_iovcnt, 1335 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1336 struct iovec *src_iovs, uint32_t src_iovcnt, 1337 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1338 uint64_t iv, uint32_t block_size, 1339 spdk_accel_step_cb cb_fn, void *cb_arg) 1340 { 1341 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1342 struct spdk_accel_task *task; 1343 struct spdk_accel_sequence *seq = *pseq; 1344 1345 assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size); 1346 1347 if (seq == NULL) { 1348 seq = accel_sequence_get(accel_ch); 1349 if (spdk_unlikely(seq == NULL)) { 1350 return -ENOMEM; 1351 } 1352 } 1353 1354 assert(seq->ch == accel_ch); 1355 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1356 if (spdk_unlikely(task == NULL)) { 1357 if (*pseq == NULL) { 1358 accel_sequence_put(seq); 1359 } 1360 1361 return -ENOMEM; 1362 } 1363 1364 task->crypto_key = key; 1365 task->src_domain = src_domain; 1366 task->src_domain_ctx = src_domain_ctx; 1367 task->s.iovs = src_iovs; 1368 task->s.iovcnt = src_iovcnt; 1369 task->dst_domain = dst_domain; 1370 task->dst_domain_ctx = dst_domain_ctx; 1371 task->d.iovs = dst_iovs; 1372 task->d.iovcnt = dst_iovcnt; 1373 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1374 task->iv = iv; 1375 task->block_size = block_size; 1376 task->op_code = SPDK_ACCEL_OPC_ENCRYPT; 1377 1378 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1379 *pseq = seq; 1380 1381 return 0; 1382 } 1383 1384 int 1385 spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1386 struct spdk_accel_crypto_key *key, 1387 struct iovec *dst_iovs, uint32_t dst_iovcnt, 1388 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1389 struct iovec *src_iovs, uint32_t src_iovcnt, 1390 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1391 uint64_t iv, uint32_t block_size, 1392 spdk_accel_step_cb cb_fn, void *cb_arg) 1393 { 1394 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1395 struct spdk_accel_task *task; 1396 struct spdk_accel_sequence *seq = *pseq; 1397 1398 assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size); 1399 1400 if (seq == NULL) { 1401 seq = accel_sequence_get(accel_ch); 1402 if (spdk_unlikely(seq == NULL)) { 1403 return -ENOMEM; 1404 } 1405 } 1406 1407 assert(seq->ch == accel_ch); 1408 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1409 if (spdk_unlikely(task == NULL)) { 1410 if (*pseq == NULL) { 1411 accel_sequence_put(seq); 1412 } 1413 1414 return -ENOMEM; 1415 } 1416 1417 task->crypto_key = key; 1418 task->src_domain = src_domain; 1419 task->src_domain_ctx = src_domain_ctx; 1420 task->s.iovs = src_iovs; 1421 task->s.iovcnt = src_iovcnt; 1422 task->dst_domain = dst_domain; 1423 task->dst_domain_ctx = dst_domain_ctx; 1424 task->d.iovs = dst_iovs; 1425 task->d.iovcnt = dst_iovcnt; 1426 task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt); 1427 task->iv = iv; 1428 task->block_size = block_size; 1429 task->op_code = SPDK_ACCEL_OPC_DECRYPT; 1430 1431 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1432 *pseq = seq; 1433 1434 return 0; 1435 } 1436 1437 int 1438 spdk_accel_append_crc32c(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1439 uint32_t *dst, struct iovec *iovs, uint32_t iovcnt, 1440 struct spdk_memory_domain *domain, void *domain_ctx, 1441 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg) 1442 { 1443 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1444 struct spdk_accel_task *task; 1445 struct spdk_accel_sequence *seq = *pseq; 1446 1447 if (seq == NULL) { 1448 seq = accel_sequence_get(accel_ch); 1449 if (spdk_unlikely(seq == NULL)) { 1450 return -ENOMEM; 1451 } 1452 } 1453 1454 assert(seq->ch == accel_ch); 1455 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1456 if (spdk_unlikely(task == NULL)) { 1457 if (*pseq == NULL) { 1458 accel_sequence_put(seq); 1459 } 1460 1461 return -ENOMEM; 1462 } 1463 1464 task->s.iovs = iovs; 1465 task->s.iovcnt = iovcnt; 1466 task->src_domain = domain; 1467 task->src_domain_ctx = domain_ctx; 1468 task->nbytes = accel_get_iovlen(iovs, iovcnt); 1469 task->crc_dst = dst; 1470 task->seed = seed; 1471 task->op_code = SPDK_ACCEL_OPC_CRC32C; 1472 task->dst_domain = NULL; 1473 1474 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1475 *pseq = seq; 1476 1477 return 0; 1478 } 1479 1480 int 1481 spdk_accel_append_dif_verify(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1482 struct iovec *iovs, size_t iovcnt, 1483 struct spdk_memory_domain *domain, void *domain_ctx, 1484 uint32_t num_blocks, 1485 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err, 1486 spdk_accel_step_cb cb_fn, void *cb_arg) 1487 { 1488 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1489 struct spdk_accel_task *task; 1490 struct spdk_accel_sequence *seq = *pseq; 1491 1492 if (seq == NULL) { 1493 seq = accel_sequence_get(accel_ch); 1494 if (spdk_unlikely(seq == NULL)) { 1495 return -ENOMEM; 1496 } 1497 } 1498 1499 assert(seq->ch == accel_ch); 1500 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1501 if (spdk_unlikely(task == NULL)) { 1502 if (*pseq == NULL) { 1503 accel_sequence_put(seq); 1504 } 1505 1506 return -ENOMEM; 1507 } 1508 1509 task->s.iovs = iovs; 1510 task->s.iovcnt = iovcnt; 1511 task->src_domain = domain; 1512 task->src_domain_ctx = domain_ctx; 1513 task->dst_domain = NULL; 1514 task->dif.ctx = ctx; 1515 task->dif.err = err; 1516 task->dif.num_blocks = num_blocks; 1517 task->nbytes = num_blocks * ctx->block_size; 1518 task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY; 1519 1520 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1521 *pseq = seq; 1522 1523 return 0; 1524 } 1525 1526 int 1527 spdk_accel_append_dif_verify_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1528 struct iovec *dst_iovs, size_t dst_iovcnt, 1529 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1530 struct iovec *src_iovs, size_t src_iovcnt, 1531 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1532 uint32_t num_blocks, 1533 const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err, 1534 spdk_accel_step_cb cb_fn, void *cb_arg) 1535 { 1536 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1537 struct spdk_accel_task *task; 1538 struct spdk_accel_sequence *seq = *pseq; 1539 1540 if (seq == NULL) { 1541 seq = accel_sequence_get(accel_ch); 1542 if (spdk_unlikely(seq == NULL)) { 1543 return -ENOMEM; 1544 } 1545 } 1546 1547 assert(seq->ch == accel_ch); 1548 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1549 if (spdk_unlikely(task == NULL)) { 1550 if (*pseq == NULL) { 1551 accel_sequence_put(seq); 1552 } 1553 1554 return -ENOMEM; 1555 } 1556 1557 task->dst_domain = dst_domain; 1558 task->dst_domain_ctx = dst_domain_ctx; 1559 task->d.iovs = dst_iovs; 1560 task->d.iovcnt = dst_iovcnt; 1561 task->src_domain = src_domain; 1562 task->src_domain_ctx = src_domain_ctx; 1563 task->s.iovs = src_iovs; 1564 task->s.iovcnt = src_iovcnt; 1565 task->dif.ctx = ctx; 1566 task->dif.err = err; 1567 task->dif.num_blocks = num_blocks; 1568 task->nbytes = num_blocks * ctx->block_size; 1569 task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY_COPY; 1570 1571 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1572 *pseq = seq; 1573 1574 return 0; 1575 } 1576 1577 int 1578 spdk_accel_append_dif_generate(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1579 struct iovec *iovs, size_t iovcnt, 1580 struct spdk_memory_domain *domain, void *domain_ctx, 1581 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1582 spdk_accel_step_cb cb_fn, void *cb_arg) 1583 { 1584 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1585 struct spdk_accel_task *task; 1586 struct spdk_accel_sequence *seq = *pseq; 1587 1588 if (seq == NULL) { 1589 seq = accel_sequence_get(accel_ch); 1590 if (spdk_unlikely(seq == NULL)) { 1591 return -ENOMEM; 1592 } 1593 } 1594 1595 assert(seq->ch == accel_ch); 1596 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1597 if (spdk_unlikely(task == NULL)) { 1598 if (*pseq == NULL) { 1599 accel_sequence_put(seq); 1600 } 1601 1602 return -ENOMEM; 1603 } 1604 1605 task->s.iovs = iovs; 1606 task->s.iovcnt = iovcnt; 1607 task->src_domain = domain; 1608 task->src_domain_ctx = domain_ctx; 1609 task->dst_domain = NULL; 1610 task->dif.ctx = ctx; 1611 task->dif.num_blocks = num_blocks; 1612 task->nbytes = num_blocks * ctx->block_size; 1613 task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE; 1614 1615 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1616 *pseq = seq; 1617 1618 return 0; 1619 } 1620 1621 int 1622 spdk_accel_append_dif_generate_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 1623 struct iovec *dst_iovs, size_t dst_iovcnt, 1624 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 1625 struct iovec *src_iovs, size_t src_iovcnt, 1626 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 1627 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1628 spdk_accel_step_cb cb_fn, void *cb_arg) 1629 { 1630 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1631 struct spdk_accel_task *task; 1632 struct spdk_accel_sequence *seq = *pseq; 1633 1634 if (seq == NULL) { 1635 seq = accel_sequence_get(accel_ch); 1636 if (spdk_unlikely(seq == NULL)) { 1637 return -ENOMEM; 1638 } 1639 } 1640 1641 assert(seq->ch == accel_ch); 1642 task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg); 1643 if (spdk_unlikely(task == NULL)) { 1644 if (*pseq == NULL) { 1645 accel_sequence_put(seq); 1646 } 1647 1648 return -ENOMEM; 1649 } 1650 1651 task->dst_domain = dst_domain; 1652 task->dst_domain_ctx = dst_domain_ctx; 1653 task->d.iovs = dst_iovs; 1654 task->d.iovcnt = dst_iovcnt; 1655 task->src_domain = src_domain; 1656 task->src_domain_ctx = src_domain_ctx; 1657 task->s.iovs = src_iovs; 1658 task->s.iovcnt = src_iovcnt; 1659 task->nbytes = num_blocks * ctx->block_size; 1660 task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE_COPY; 1661 1662 TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link); 1663 *pseq = seq; 1664 1665 return 0; 1666 } 1667 1668 int 1669 spdk_accel_append_dix_generate(struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, 1670 struct iovec *iovs, size_t iovcnt, struct spdk_memory_domain *domain, 1671 void *domain_ctx, struct iovec *md_iov, 1672 struct spdk_memory_domain *md_domain, void *md_domain_ctx, 1673 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1674 spdk_accel_step_cb cb_fn, void *cb_arg) 1675 { 1676 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1677 struct spdk_accel_task *task; 1678 struct spdk_accel_sequence *pseq = *seq; 1679 1680 if (pseq == NULL) { 1681 pseq = accel_sequence_get(accel_ch); 1682 if (spdk_unlikely(pseq == NULL)) { 1683 return -ENOMEM; 1684 } 1685 } 1686 1687 assert(pseq->ch == accel_ch); 1688 task = accel_sequence_get_task(accel_ch, pseq, cb_fn, cb_arg); 1689 if (spdk_unlikely(task == NULL)) { 1690 if (*seq == NULL) { 1691 accel_sequence_put(pseq); 1692 } 1693 1694 return -ENOMEM; 1695 } 1696 1697 task->d.iovs = md_iov; 1698 task->d.iovcnt = 1; 1699 task->dst_domain = md_domain; 1700 task->dst_domain_ctx = md_domain_ctx; 1701 task->s.iovs = iovs; 1702 task->s.iovcnt = iovcnt; 1703 task->src_domain = domain; 1704 task->src_domain_ctx = domain_ctx; 1705 task->dif.ctx = ctx; 1706 task->dif.num_blocks = num_blocks; 1707 task->nbytes = num_blocks * ctx->block_size; 1708 task->op_code = SPDK_ACCEL_OPC_DIX_GENERATE; 1709 1710 TAILQ_INSERT_TAIL(&pseq->tasks, task, seq_link); 1711 *seq = pseq; 1712 1713 return 0; 1714 } 1715 1716 int 1717 spdk_accel_append_dix_verify(struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, 1718 struct iovec *iovs, size_t iovcnt, struct spdk_memory_domain *domain, 1719 void *domain_ctx, struct iovec *md_iov, 1720 struct spdk_memory_domain *md_domain, void *md_domain_ctx, 1721 uint32_t num_blocks, const struct spdk_dif_ctx *ctx, 1722 struct spdk_dif_error *err, spdk_accel_step_cb cb_fn, void *cb_arg) 1723 { 1724 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1725 struct spdk_accel_task *task; 1726 struct spdk_accel_sequence *pseq = *seq; 1727 1728 if (pseq == NULL) { 1729 pseq = accel_sequence_get(accel_ch); 1730 if (spdk_unlikely(pseq == NULL)) { 1731 return -ENOMEM; 1732 } 1733 } 1734 1735 assert(pseq->ch == accel_ch); 1736 task = accel_sequence_get_task(accel_ch, pseq, cb_fn, cb_arg); 1737 if (spdk_unlikely(task == NULL)) { 1738 if (*seq == NULL) { 1739 accel_sequence_put(pseq); 1740 } 1741 1742 return -ENOMEM; 1743 } 1744 1745 task->d.iovs = md_iov; 1746 task->d.iovcnt = 1; 1747 task->dst_domain = md_domain; 1748 task->dst_domain_ctx = md_domain_ctx; 1749 task->s.iovs = iovs; 1750 task->s.iovcnt = iovcnt; 1751 task->src_domain = domain; 1752 task->src_domain_ctx = domain_ctx; 1753 task->dif.ctx = ctx; 1754 task->dif.err = err; 1755 task->dif.num_blocks = num_blocks; 1756 task->nbytes = num_blocks * ctx->block_size; 1757 task->op_code = SPDK_ACCEL_OPC_DIX_VERIFY; 1758 1759 TAILQ_INSERT_TAIL(&pseq->tasks, task, seq_link); 1760 *seq = pseq; 1761 1762 return 0; 1763 } 1764 1765 int 1766 spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf, 1767 struct spdk_memory_domain **domain, void **domain_ctx) 1768 { 1769 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1770 struct accel_buffer *accel_buf; 1771 1772 accel_buf = accel_get_buf(accel_ch, len); 1773 if (spdk_unlikely(accel_buf == NULL)) { 1774 return -ENOMEM; 1775 } 1776 1777 accel_buf->ch = accel_ch; 1778 1779 /* We always return the same pointer and identify the buffers through domain_ctx */ 1780 *buf = ACCEL_BUFFER_BASE; 1781 *domain_ctx = accel_buf; 1782 *domain = g_accel_domain; 1783 1784 return 0; 1785 } 1786 1787 void 1788 spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf, 1789 struct spdk_memory_domain *domain, void *domain_ctx) 1790 { 1791 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 1792 struct accel_buffer *accel_buf = domain_ctx; 1793 1794 assert(domain == g_accel_domain); 1795 assert(buf == ACCEL_BUFFER_BASE); 1796 1797 accel_put_buf(accel_ch, accel_buf); 1798 } 1799 1800 static void 1801 accel_sequence_complete_task(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1802 { 1803 struct accel_io_channel *ch = seq->ch; 1804 spdk_accel_step_cb cb_fn; 1805 void *cb_arg; 1806 1807 TAILQ_REMOVE(&seq->tasks, task, seq_link); 1808 cb_fn = task->step_cb_fn; 1809 cb_arg = task->cb_arg; 1810 task->seq = NULL; 1811 if (task->has_aux) { 1812 SLIST_INSERT_HEAD(&ch->task_aux_data_pool, task->aux, link); 1813 task->aux = NULL; 1814 task->has_aux = false; 1815 } 1816 1817 _put_task(ch, task); 1818 1819 if (cb_fn != NULL) { 1820 cb_fn(cb_arg); 1821 } 1822 } 1823 1824 static void 1825 accel_sequence_complete_tasks(struct spdk_accel_sequence *seq) 1826 { 1827 struct spdk_accel_task *task; 1828 1829 while (!TAILQ_EMPTY(&seq->tasks)) { 1830 task = TAILQ_FIRST(&seq->tasks); 1831 accel_sequence_complete_task(seq, task); 1832 } 1833 } 1834 1835 static void 1836 accel_sequence_complete(struct spdk_accel_sequence *seq) 1837 { 1838 spdk_accel_completion_cb cb_fn = seq->cb_fn; 1839 void *cb_arg = seq->cb_arg; 1840 int status = seq->status; 1841 1842 SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, status); 1843 1844 accel_update_stats(seq->ch, sequence_executed, 1); 1845 if (spdk_unlikely(status != 0)) { 1846 accel_update_stats(seq->ch, sequence_failed, 1); 1847 } 1848 1849 /* First notify all users that appended operations to this sequence */ 1850 accel_sequence_complete_tasks(seq); 1851 accel_sequence_put(seq); 1852 1853 /* Then notify the user that finished the sequence */ 1854 cb_fn(cb_arg, status); 1855 } 1856 1857 static void 1858 accel_update_virt_iov(struct iovec *diov, struct iovec *siov, struct accel_buffer *accel_buf) 1859 { 1860 uintptr_t offset; 1861 1862 offset = (uintptr_t)siov->iov_base & ACCEL_BUFFER_OFFSET_MASK; 1863 assert(offset < accel_buf->len); 1864 1865 diov->iov_base = (char *)accel_buf->buf + offset; 1866 diov->iov_len = siov->iov_len; 1867 } 1868 1869 static void 1870 accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf) 1871 { 1872 struct spdk_accel_task *task; 1873 struct iovec *iov; 1874 1875 /* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks 1876 * in a sequence that were using it. 1877 */ 1878 TAILQ_FOREACH(task, &seq->tasks, seq_link) { 1879 if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) { 1880 if (!task->has_aux) { 1881 task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool); 1882 assert(task->aux && "Can't allocate aux data structure"); 1883 task->has_aux = true; 1884 SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link); 1885 } 1886 1887 iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_SRC]; 1888 assert(task->s.iovcnt == 1); 1889 accel_update_virt_iov(iov, &task->s.iovs[0], buf); 1890 task->src_domain = NULL; 1891 task->s.iovs = iov; 1892 } 1893 if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) { 1894 if (!task->has_aux) { 1895 task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool); 1896 assert(task->aux && "Can't allocate aux data structure"); 1897 task->has_aux = true; 1898 SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link); 1899 } 1900 1901 iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_DST]; 1902 assert(task->d.iovcnt == 1); 1903 accel_update_virt_iov(iov, &task->d.iovs[0], buf); 1904 task->dst_domain = NULL; 1905 task->d.iovs = iov; 1906 } 1907 } 1908 } 1909 1910 static void accel_process_sequence(struct spdk_accel_sequence *seq); 1911 1912 static void 1913 accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf) 1914 { 1915 struct accel_buffer *accel_buf; 1916 1917 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1918 1919 assert(accel_buf->seq != NULL); 1920 assert(accel_buf->buf == NULL); 1921 accel_buf->buf = buf; 1922 1923 assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF); 1924 accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF); 1925 accel_sequence_set_virtbuf(accel_buf->seq, accel_buf); 1926 accel_process_sequence(accel_buf->seq); 1927 } 1928 1929 static bool 1930 accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf, 1931 spdk_iobuf_get_cb cb_fn) 1932 { 1933 struct accel_io_channel *ch = seq->ch; 1934 1935 assert(buf->seq == NULL); 1936 1937 buf->seq = seq; 1938 1939 /* Buffer might be already allocated by memory domain translation. */ 1940 if (buf->buf) { 1941 return true; 1942 } 1943 1944 buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn); 1945 if (spdk_unlikely(buf->buf == NULL)) { 1946 accel_update_stats(ch, retry.iobuf, 1); 1947 return false; 1948 } 1949 1950 return true; 1951 } 1952 1953 static bool 1954 accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 1955 { 1956 /* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to 1957 * NULL */ 1958 if (task->src_domain == g_accel_domain) { 1959 if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx, 1960 accel_iobuf_get_virtbuf_cb)) { 1961 return false; 1962 } 1963 1964 accel_sequence_set_virtbuf(seq, task->src_domain_ctx); 1965 } 1966 1967 if (task->dst_domain == g_accel_domain) { 1968 if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx, 1969 accel_iobuf_get_virtbuf_cb)) { 1970 return false; 1971 } 1972 1973 accel_sequence_set_virtbuf(seq, task->dst_domain_ctx); 1974 } 1975 1976 return true; 1977 } 1978 1979 static void 1980 accel_sequence_get_buf_cb(struct spdk_iobuf_entry *entry, void *buf) 1981 { 1982 struct accel_buffer *accel_buf; 1983 1984 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 1985 1986 assert(accel_buf->seq != NULL); 1987 assert(accel_buf->buf == NULL); 1988 accel_buf->buf = buf; 1989 1990 accel_sequence_set_virtbuf(accel_buf->seq, accel_buf); 1991 accel_buf->cb_fn(accel_buf->seq, accel_buf->cb_ctx); 1992 } 1993 1994 bool 1995 spdk_accel_alloc_sequence_buf(struct spdk_accel_sequence *seq, void *buf, 1996 struct spdk_memory_domain *domain, void *domain_ctx, 1997 spdk_accel_sequence_get_buf_cb cb_fn, void *cb_ctx) 1998 { 1999 struct accel_buffer *accel_buf = domain_ctx; 2000 2001 assert(domain == g_accel_domain); 2002 accel_buf->cb_fn = cb_fn; 2003 accel_buf->cb_ctx = cb_ctx; 2004 2005 if (!accel_sequence_alloc_buf(seq, accel_buf, accel_sequence_get_buf_cb)) { 2006 return false; 2007 } 2008 2009 accel_sequence_set_virtbuf(seq, accel_buf); 2010 2011 return true; 2012 } 2013 2014 struct spdk_accel_task * 2015 spdk_accel_sequence_first_task(struct spdk_accel_sequence *seq) 2016 { 2017 return TAILQ_FIRST(&seq->tasks); 2018 } 2019 2020 struct spdk_accel_task * 2021 spdk_accel_sequence_next_task(struct spdk_accel_task *task) 2022 { 2023 return TAILQ_NEXT(task, seq_link); 2024 } 2025 2026 static inline void 2027 accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs, 2028 uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx, 2029 struct accel_buffer *buf) 2030 { 2031 bounce->orig_iovs = *iovs; 2032 bounce->orig_iovcnt = *iovcnt; 2033 bounce->orig_domain = *domain; 2034 bounce->orig_domain_ctx = *domain_ctx; 2035 bounce->iov.iov_base = buf->buf; 2036 bounce->iov.iov_len = buf->len; 2037 2038 *iovs = &bounce->iov; 2039 *iovcnt = 1; 2040 *domain = NULL; 2041 } 2042 2043 static void 2044 accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf) 2045 { 2046 struct spdk_accel_task *task; 2047 struct accel_buffer *accel_buf; 2048 2049 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 2050 assert(accel_buf->buf == NULL); 2051 accel_buf->buf = buf; 2052 2053 task = TAILQ_FIRST(&accel_buf->seq->tasks); 2054 assert(task != NULL); 2055 2056 assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF); 2057 accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF); 2058 assert(task->aux); 2059 assert(task->has_aux); 2060 accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain, 2061 &task->src_domain_ctx, accel_buf); 2062 accel_process_sequence(accel_buf->seq); 2063 } 2064 2065 static void 2066 accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf) 2067 { 2068 struct spdk_accel_task *task; 2069 struct accel_buffer *accel_buf; 2070 2071 accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf); 2072 assert(accel_buf->buf == NULL); 2073 accel_buf->buf = buf; 2074 2075 task = TAILQ_FIRST(&accel_buf->seq->tasks); 2076 assert(task != NULL); 2077 2078 assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF); 2079 accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF); 2080 assert(task->aux); 2081 assert(task->has_aux); 2082 accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain, 2083 &task->dst_domain_ctx, accel_buf); 2084 accel_process_sequence(accel_buf->seq); 2085 } 2086 2087 static int 2088 accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 2089 { 2090 struct accel_buffer *buf; 2091 2092 if (task->src_domain != NULL) { 2093 /* By the time we're here, accel buffers should have been allocated */ 2094 assert(task->src_domain != g_accel_domain); 2095 2096 if (!task->has_aux) { 2097 task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool); 2098 if (spdk_unlikely(!task->aux)) { 2099 SPDK_ERRLOG("Can't allocate aux data structure\n"); 2100 assert(0); 2101 return -EAGAIN; 2102 } 2103 task->has_aux = true; 2104 SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link); 2105 } 2106 buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt)); 2107 if (buf == NULL) { 2108 SPDK_ERRLOG("Couldn't allocate buffer descriptor\n"); 2109 return -ENOMEM; 2110 } 2111 2112 SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link); 2113 if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) { 2114 return -EAGAIN; 2115 } 2116 2117 accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt, 2118 &task->src_domain, &task->src_domain_ctx, buf); 2119 } 2120 2121 if (task->dst_domain != NULL) { 2122 /* By the time we're here, accel buffers should have been allocated */ 2123 assert(task->dst_domain != g_accel_domain); 2124 2125 if (!task->has_aux) { 2126 task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool); 2127 if (spdk_unlikely(!task->aux)) { 2128 SPDK_ERRLOG("Can't allocate aux data structure\n"); 2129 assert(0); 2130 return -EAGAIN; 2131 } 2132 task->has_aux = true; 2133 SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link); 2134 } 2135 buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt)); 2136 if (buf == NULL) { 2137 /* The src buffer will be released when a sequence is completed */ 2138 SPDK_ERRLOG("Couldn't allocate buffer descriptor\n"); 2139 return -ENOMEM; 2140 } 2141 2142 SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link); 2143 if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) { 2144 return -EAGAIN; 2145 } 2146 2147 accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt, 2148 &task->dst_domain, &task->dst_domain_ctx, buf); 2149 } 2150 2151 return 0; 2152 } 2153 2154 static void 2155 accel_task_pull_data_cb(void *ctx, int status) 2156 { 2157 struct spdk_accel_sequence *seq = ctx; 2158 2159 assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA); 2160 if (spdk_likely(status == 0)) { 2161 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK); 2162 } else { 2163 accel_sequence_set_fail(seq, status); 2164 } 2165 2166 accel_process_sequence(seq); 2167 } 2168 2169 static void 2170 accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 2171 { 2172 int rc; 2173 2174 assert(task->has_aux); 2175 assert(task->aux); 2176 assert(task->aux->bounce.s.orig_iovs != NULL); 2177 assert(task->aux->bounce.s.orig_domain != NULL); 2178 assert(task->aux->bounce.s.orig_domain != g_accel_domain); 2179 assert(!g_modules_opc[task->op_code].supports_memory_domains); 2180 2181 rc = spdk_memory_domain_pull_data(task->aux->bounce.s.orig_domain, 2182 task->aux->bounce.s.orig_domain_ctx, 2183 task->aux->bounce.s.orig_iovs, task->aux->bounce.s.orig_iovcnt, 2184 task->s.iovs, task->s.iovcnt, 2185 accel_task_pull_data_cb, seq); 2186 if (spdk_unlikely(rc != 0)) { 2187 SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n", 2188 spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc); 2189 accel_sequence_set_fail(seq, rc); 2190 } 2191 } 2192 2193 static void 2194 accel_task_push_data_cb(void *ctx, int status) 2195 { 2196 struct spdk_accel_sequence *seq = ctx; 2197 2198 assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA); 2199 if (spdk_likely(status == 0)) { 2200 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK); 2201 } else { 2202 accel_sequence_set_fail(seq, status); 2203 } 2204 2205 accel_process_sequence(seq); 2206 } 2207 2208 static void 2209 accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task) 2210 { 2211 int rc; 2212 2213 assert(task->has_aux); 2214 assert(task->aux); 2215 assert(task->aux->bounce.d.orig_iovs != NULL); 2216 assert(task->aux->bounce.d.orig_domain != NULL); 2217 assert(task->aux->bounce.d.orig_domain != g_accel_domain); 2218 assert(!g_modules_opc[task->op_code].supports_memory_domains); 2219 2220 rc = spdk_memory_domain_push_data(task->aux->bounce.d.orig_domain, 2221 task->aux->bounce.d.orig_domain_ctx, 2222 task->aux->bounce.d.orig_iovs, task->aux->bounce.d.orig_iovcnt, 2223 task->d.iovs, task->d.iovcnt, 2224 accel_task_push_data_cb, seq); 2225 if (spdk_unlikely(rc != 0)) { 2226 SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n", 2227 spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc); 2228 accel_sequence_set_fail(seq, rc); 2229 } 2230 } 2231 2232 static void 2233 accel_process_sequence(struct spdk_accel_sequence *seq) 2234 { 2235 struct accel_io_channel *accel_ch = seq->ch; 2236 struct spdk_accel_task *task; 2237 enum accel_sequence_state state; 2238 int rc; 2239 2240 /* Prevent recursive calls to this function */ 2241 if (spdk_unlikely(seq->in_process_sequence)) { 2242 return; 2243 } 2244 seq->in_process_sequence = true; 2245 2246 task = TAILQ_FIRST(&seq->tasks); 2247 do { 2248 state = seq->state; 2249 switch (state) { 2250 case ACCEL_SEQUENCE_STATE_INIT: 2251 if (g_accel_driver != NULL) { 2252 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS); 2253 break; 2254 } 2255 /* Fall through */ 2256 case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF: 2257 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF); 2258 if (!accel_sequence_check_virtbuf(seq, task)) { 2259 /* We couldn't allocate a buffer, wait until one is available */ 2260 break; 2261 } 2262 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF); 2263 /* Fall through */ 2264 case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF: 2265 /* If a module supports memory domains, we don't need to allocate bounce 2266 * buffers */ 2267 if (g_modules_opc[task->op_code].supports_memory_domains) { 2268 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK); 2269 break; 2270 } 2271 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF); 2272 rc = accel_sequence_check_bouncebuf(seq, task); 2273 if (spdk_unlikely(rc != 0)) { 2274 /* We couldn't allocate a buffer, wait until one is available */ 2275 if (rc == -EAGAIN) { 2276 break; 2277 } 2278 accel_sequence_set_fail(seq, rc); 2279 break; 2280 } 2281 if (task->has_aux && task->s.iovs == &task->aux->bounce.s.iov) { 2282 assert(task->aux->bounce.s.orig_iovs); 2283 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA); 2284 break; 2285 } 2286 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK); 2287 /* Fall through */ 2288 case ACCEL_SEQUENCE_STATE_EXEC_TASK: 2289 SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n", 2290 g_opcode_strings[task->op_code], seq); 2291 2292 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK); 2293 rc = accel_submit_task(accel_ch, task); 2294 if (spdk_unlikely(rc != 0)) { 2295 SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n", 2296 g_opcode_strings[task->op_code], seq); 2297 accel_sequence_set_fail(seq, rc); 2298 } 2299 break; 2300 case ACCEL_SEQUENCE_STATE_PULL_DATA: 2301 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA); 2302 accel_task_pull_data(seq, task); 2303 break; 2304 case ACCEL_SEQUENCE_STATE_COMPLETE_TASK: 2305 if (task->has_aux && task->d.iovs == &task->aux->bounce.d.iov) { 2306 assert(task->aux->bounce.d.orig_iovs); 2307 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA); 2308 break; 2309 } 2310 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK); 2311 break; 2312 case ACCEL_SEQUENCE_STATE_PUSH_DATA: 2313 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA); 2314 accel_task_push_data(seq, task); 2315 break; 2316 case ACCEL_SEQUENCE_STATE_NEXT_TASK: 2317 accel_sequence_complete_task(seq, task); 2318 /* Check if there are any remaining tasks */ 2319 task = TAILQ_FIRST(&seq->tasks); 2320 if (task == NULL) { 2321 /* Immediately return here to make sure we don't touch the sequence 2322 * after it's completed */ 2323 accel_sequence_complete(seq); 2324 return; 2325 } 2326 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT); 2327 break; 2328 case ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS: 2329 assert(!TAILQ_EMPTY(&seq->tasks)); 2330 2331 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS); 2332 rc = g_accel_driver->execute_sequence(accel_ch->driver_channel, seq); 2333 if (spdk_unlikely(rc != 0)) { 2334 SPDK_ERRLOG("Failed to execute sequence: %p using driver: %s\n", 2335 seq, g_accel_driver->name); 2336 accel_sequence_set_fail(seq, rc); 2337 } 2338 break; 2339 case ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS: 2340 /* Get the task again, as the driver might have completed some tasks 2341 * synchronously */ 2342 task = TAILQ_FIRST(&seq->tasks); 2343 if (task == NULL) { 2344 /* Immediately return here to make sure we don't touch the sequence 2345 * after it's completed */ 2346 accel_sequence_complete(seq); 2347 return; 2348 } 2349 /* We don't want to execute the next task through the driver, so we 2350 * explicitly omit the INIT state here */ 2351 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF); 2352 break; 2353 case ACCEL_SEQUENCE_STATE_ERROR: 2354 /* Immediately return here to make sure we don't touch the sequence 2355 * after it's completed */ 2356 assert(seq->status != 0); 2357 accel_sequence_complete(seq); 2358 return; 2359 case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF: 2360 case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF: 2361 case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA: 2362 case ACCEL_SEQUENCE_STATE_AWAIT_TASK: 2363 case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA: 2364 case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS: 2365 break; 2366 default: 2367 assert(0 && "bad state"); 2368 break; 2369 } 2370 } while (seq->state != state); 2371 2372 seq->in_process_sequence = false; 2373 } 2374 2375 static void 2376 accel_sequence_task_cb(struct spdk_accel_sequence *seq, struct spdk_accel_task *task, int status) 2377 { 2378 switch (seq->state) { 2379 case ACCEL_SEQUENCE_STATE_AWAIT_TASK: 2380 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK); 2381 if (spdk_unlikely(status != 0)) { 2382 SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n", 2383 g_opcode_strings[task->op_code], seq); 2384 accel_sequence_set_fail(seq, status); 2385 } 2386 2387 accel_process_sequence(seq); 2388 break; 2389 case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS: 2390 assert(g_accel_driver != NULL); 2391 /* Immediately remove the task from the outstanding list to make sure the next call 2392 * to spdk_accel_sequence_first_task() doesn't return it */ 2393 accel_sequence_complete_task(seq, task); 2394 if (spdk_unlikely(status != 0)) { 2395 SPDK_ERRLOG("Failed to execute %s operation, sequence: %p through " 2396 "driver: %s\n", g_opcode_strings[task->op_code], seq, 2397 g_accel_driver->name); 2398 /* Update status without using accel_sequence_set_fail() to avoid changing 2399 * seq's state to ERROR until driver calls spdk_accel_sequence_continue() */ 2400 seq->status = status; 2401 } 2402 break; 2403 default: 2404 assert(0 && "bad state"); 2405 break; 2406 } 2407 } 2408 2409 void 2410 spdk_accel_sequence_continue(struct spdk_accel_sequence *seq) 2411 { 2412 assert(g_accel_driver != NULL); 2413 assert(seq->state == ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS); 2414 2415 if (spdk_likely(seq->status == 0)) { 2416 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS); 2417 } else { 2418 accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR); 2419 } 2420 2421 accel_process_sequence(seq); 2422 } 2423 2424 static bool 2425 accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt) 2426 { 2427 /* For now, just do a dumb check that the iovecs arrays are exactly the same */ 2428 if (iovacnt != iovbcnt) { 2429 return false; 2430 } 2431 2432 return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0; 2433 } 2434 2435 static bool 2436 accel_task_set_dstbuf(struct spdk_accel_task *task, struct spdk_accel_task *next) 2437 { 2438 struct spdk_accel_task *prev; 2439 2440 switch (task->op_code) { 2441 case SPDK_ACCEL_OPC_DECOMPRESS: 2442 case SPDK_ACCEL_OPC_FILL: 2443 case SPDK_ACCEL_OPC_ENCRYPT: 2444 case SPDK_ACCEL_OPC_DECRYPT: 2445 case SPDK_ACCEL_OPC_DIF_GENERATE_COPY: 2446 case SPDK_ACCEL_OPC_DIF_VERIFY_COPY: 2447 if (task->dst_domain != next->src_domain) { 2448 return false; 2449 } 2450 if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt, 2451 next->s.iovs, next->s.iovcnt)) { 2452 return false; 2453 } 2454 task->d.iovs = next->d.iovs; 2455 task->d.iovcnt = next->d.iovcnt; 2456 task->dst_domain = next->dst_domain; 2457 task->dst_domain_ctx = next->dst_domain_ctx; 2458 break; 2459 case SPDK_ACCEL_OPC_CRC32C: 2460 case SPDK_ACCEL_OPC_DIX_GENERATE: 2461 case SPDK_ACCEL_OPC_DIX_VERIFY: 2462 /* crc32 and dix_generate/verify are special, because they do not have a dst buffer */ 2463 if (task->src_domain != next->src_domain) { 2464 return false; 2465 } 2466 if (!accel_compare_iovs(task->s.iovs, task->s.iovcnt, 2467 next->s.iovs, next->s.iovcnt)) { 2468 return false; 2469 } 2470 /* We can only change operation's buffer if we can change previous task's buffer */ 2471 prev = TAILQ_PREV(task, accel_sequence_tasks, seq_link); 2472 if (prev == NULL) { 2473 return false; 2474 } 2475 if (!accel_task_set_dstbuf(prev, next)) { 2476 return false; 2477 } 2478 task->s.iovs = next->d.iovs; 2479 task->s.iovcnt = next->d.iovcnt; 2480 task->src_domain = next->dst_domain; 2481 task->src_domain_ctx = next->dst_domain_ctx; 2482 break; 2483 default: 2484 return false; 2485 } 2486 2487 return true; 2488 } 2489 2490 static void 2491 accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task, 2492 struct spdk_accel_task **next_task) 2493 { 2494 struct spdk_accel_task *next = *next_task; 2495 2496 switch (task->op_code) { 2497 case SPDK_ACCEL_OPC_COPY: 2498 /* We only allow changing src of operations that actually have a src, e.g. we never 2499 * do it for fill. Theoretically, it is possible, but we'd have to be careful to 2500 * change the src of the operation after fill (which in turn could also be a fill). 2501 * So, for the sake of simplicity, skip this type of operations for now. 2502 */ 2503 if (next->op_code != SPDK_ACCEL_OPC_DECOMPRESS && 2504 next->op_code != SPDK_ACCEL_OPC_COPY && 2505 next->op_code != SPDK_ACCEL_OPC_ENCRYPT && 2506 next->op_code != SPDK_ACCEL_OPC_DECRYPT && 2507 next->op_code != SPDK_ACCEL_OPC_COPY_CRC32C && 2508 next->op_code != SPDK_ACCEL_OPC_DIF_GENERATE_COPY && 2509 next->op_code != SPDK_ACCEL_OPC_DIF_VERIFY_COPY) { 2510 break; 2511 } 2512 if (task->dst_domain != next->src_domain) { 2513 break; 2514 } 2515 if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt, 2516 next->s.iovs, next->s.iovcnt)) { 2517 break; 2518 } 2519 next->s.iovs = task->s.iovs; 2520 next->s.iovcnt = task->s.iovcnt; 2521 next->src_domain = task->src_domain; 2522 next->src_domain_ctx = task->src_domain_ctx; 2523 accel_sequence_complete_task(seq, task); 2524 break; 2525 case SPDK_ACCEL_OPC_DECOMPRESS: 2526 case SPDK_ACCEL_OPC_FILL: 2527 case SPDK_ACCEL_OPC_ENCRYPT: 2528 case SPDK_ACCEL_OPC_DECRYPT: 2529 case SPDK_ACCEL_OPC_CRC32C: 2530 case SPDK_ACCEL_OPC_DIF_GENERATE_COPY: 2531 case SPDK_ACCEL_OPC_DIF_VERIFY_COPY: 2532 case SPDK_ACCEL_OPC_DIX_GENERATE: 2533 case SPDK_ACCEL_OPC_DIX_VERIFY: 2534 /* We can only merge tasks when one of them is a copy */ 2535 if (next->op_code != SPDK_ACCEL_OPC_COPY) { 2536 break; 2537 } 2538 if (!accel_task_set_dstbuf(task, next)) { 2539 break; 2540 } 2541 /* We're removing next_task from the tasks queue, so we need to update its pointer, 2542 * so that the TAILQ_FOREACH_SAFE() loop below works correctly */ 2543 *next_task = TAILQ_NEXT(next, seq_link); 2544 accel_sequence_complete_task(seq, next); 2545 break; 2546 default: 2547 assert(0 && "bad opcode"); 2548 break; 2549 } 2550 } 2551 2552 void 2553 spdk_accel_sequence_finish(struct spdk_accel_sequence *seq, 2554 spdk_accel_completion_cb cb_fn, void *cb_arg) 2555 { 2556 struct spdk_accel_task *task, *next; 2557 2558 /* Try to remove any copy operations if possible */ 2559 TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) { 2560 if (next == NULL) { 2561 break; 2562 } 2563 accel_sequence_merge_tasks(seq, task, &next); 2564 } 2565 2566 seq->cb_fn = cb_fn; 2567 seq->cb_arg = cb_arg; 2568 2569 accel_process_sequence(seq); 2570 } 2571 2572 void 2573 spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq) 2574 { 2575 struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks); 2576 struct spdk_accel_task *task; 2577 2578 TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link); 2579 2580 while (!TAILQ_EMPTY(&tasks)) { 2581 task = TAILQ_FIRST(&tasks); 2582 TAILQ_REMOVE(&tasks, task, seq_link); 2583 TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link); 2584 } 2585 } 2586 2587 void 2588 spdk_accel_sequence_abort(struct spdk_accel_sequence *seq) 2589 { 2590 if (seq == NULL) { 2591 return; 2592 } 2593 2594 accel_sequence_complete_tasks(seq); 2595 accel_sequence_put(seq); 2596 } 2597 2598 struct spdk_memory_domain * 2599 spdk_accel_get_memory_domain(void) 2600 { 2601 return g_accel_domain; 2602 } 2603 2604 static struct spdk_accel_module_if * 2605 _module_find_by_name(const char *name) 2606 { 2607 struct spdk_accel_module_if *accel_module = NULL; 2608 2609 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 2610 if (strcmp(name, accel_module->name) == 0) { 2611 break; 2612 } 2613 } 2614 2615 return accel_module; 2616 } 2617 2618 static inline struct spdk_accel_crypto_key * 2619 _accel_crypto_key_get(const char *name) 2620 { 2621 struct spdk_accel_crypto_key *key; 2622 2623 assert(spdk_spin_held(&g_keyring_spin)); 2624 2625 TAILQ_FOREACH(key, &g_keyring, link) { 2626 if (strcmp(name, key->param.key_name) == 0) { 2627 return key; 2628 } 2629 } 2630 2631 return NULL; 2632 } 2633 2634 static void 2635 accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key) 2636 { 2637 if (key->param.hex_key) { 2638 spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2); 2639 free(key->param.hex_key); 2640 } 2641 if (key->param.hex_key2) { 2642 spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2); 2643 free(key->param.hex_key2); 2644 } 2645 free(key->param.tweak_mode); 2646 free(key->param.key_name); 2647 free(key->param.cipher); 2648 if (key->key) { 2649 spdk_memset_s(key->key, key->key_size, 0, key->key_size); 2650 free(key->key); 2651 } 2652 if (key->key2) { 2653 spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size); 2654 free(key->key2); 2655 } 2656 free(key); 2657 } 2658 2659 static void 2660 accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key) 2661 { 2662 assert(key->module_if); 2663 assert(key->module_if->crypto_key_deinit); 2664 2665 key->module_if->crypto_key_deinit(key); 2666 accel_crypto_key_free_mem(key); 2667 } 2668 2669 /* 2670 * This function mitigates a timing side channel which could be caused by using strcmp() 2671 * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in 2672 * the article [1] for more details 2673 * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html 2674 */ 2675 static bool 2676 accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len) 2677 { 2678 size_t i; 2679 volatile size_t x = k1_len ^ k2_len; 2680 2681 for (i = 0; ((i < k1_len) & (i < k2_len)); i++) { 2682 x |= k1[i] ^ k2[i]; 2683 } 2684 2685 return x == 0; 2686 } 2687 2688 static const char *g_tweak_modes[] = { 2689 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA] = "SIMPLE_LBA", 2690 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_JOIN_NEG_LBA_WITH_LBA] = "JOIN_NEG_LBA_WITH_LBA", 2691 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_FULL_LBA] = "INCR_512_FULL_LBA", 2692 [SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_UPPER_LBA] = "INCR_512_UPPER_LBA", 2693 }; 2694 2695 static const char *g_ciphers[] = { 2696 [SPDK_ACCEL_CIPHER_AES_CBC] = "AES_CBC", 2697 [SPDK_ACCEL_CIPHER_AES_XTS] = "AES_XTS", 2698 }; 2699 2700 int 2701 spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param) 2702 { 2703 struct spdk_accel_module_if *module; 2704 struct spdk_accel_crypto_key *key; 2705 size_t hex_key_size, hex_key2_size; 2706 bool found = false; 2707 size_t i; 2708 int rc; 2709 2710 if (!param || !param->hex_key || !param->cipher || !param->key_name) { 2711 return -EINVAL; 2712 } 2713 2714 if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) { 2715 /* hardly ever possible, but let's check and warn the user */ 2716 SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n"); 2717 } 2718 module = g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module; 2719 2720 if (!module) { 2721 SPDK_ERRLOG("No accel module found assigned for crypto operation\n"); 2722 return -ENOENT; 2723 } 2724 2725 if (!module->crypto_key_init || !module->crypto_supports_cipher) { 2726 SPDK_ERRLOG("Module %s doesn't support crypto operations\n", module->name); 2727 return -ENOTSUP; 2728 } 2729 2730 key = calloc(1, sizeof(*key)); 2731 if (!key) { 2732 return -ENOMEM; 2733 } 2734 2735 key->param.key_name = strdup(param->key_name); 2736 if (!key->param.key_name) { 2737 rc = -ENOMEM; 2738 goto error; 2739 } 2740 2741 for (i = 0; i < SPDK_COUNTOF(g_ciphers); ++i) { 2742 assert(g_ciphers[i]); 2743 2744 if (strncmp(param->cipher, g_ciphers[i], strlen(g_ciphers[i])) == 0) { 2745 key->cipher = i; 2746 found = true; 2747 break; 2748 } 2749 } 2750 2751 if (!found) { 2752 SPDK_ERRLOG("Failed to parse cipher\n"); 2753 rc = -EINVAL; 2754 goto error; 2755 } 2756 2757 key->param.cipher = strdup(param->cipher); 2758 if (!key->param.cipher) { 2759 rc = -ENOMEM; 2760 goto error; 2761 } 2762 2763 hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2764 if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) { 2765 SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2766 rc = -EINVAL; 2767 goto error; 2768 } 2769 2770 if (hex_key_size == 0) { 2771 SPDK_ERRLOG("key1 size cannot be 0\n"); 2772 rc = -EINVAL; 2773 goto error; 2774 } 2775 2776 key->param.hex_key = strdup(param->hex_key); 2777 if (!key->param.hex_key) { 2778 rc = -ENOMEM; 2779 goto error; 2780 } 2781 2782 key->key_size = hex_key_size / 2; 2783 key->key = spdk_unhexlify(key->param.hex_key); 2784 if (!key->key) { 2785 SPDK_ERRLOG("Failed to unhexlify key1\n"); 2786 rc = -EINVAL; 2787 goto error; 2788 } 2789 2790 if (param->hex_key2) { 2791 hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2792 if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) { 2793 SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH); 2794 rc = -EINVAL; 2795 goto error; 2796 } 2797 2798 if (hex_key2_size == 0) { 2799 SPDK_ERRLOG("key2 size cannot be 0\n"); 2800 rc = -EINVAL; 2801 goto error; 2802 } 2803 2804 key->param.hex_key2 = strdup(param->hex_key2); 2805 if (!key->param.hex_key2) { 2806 rc = -ENOMEM; 2807 goto error; 2808 } 2809 2810 key->key2_size = hex_key2_size / 2; 2811 key->key2 = spdk_unhexlify(key->param.hex_key2); 2812 if (!key->key2) { 2813 SPDK_ERRLOG("Failed to unhexlify key2\n"); 2814 rc = -EINVAL; 2815 goto error; 2816 } 2817 } 2818 2819 key->tweak_mode = ACCEL_CRYPTO_TWEAK_MODE_DEFAULT; 2820 if (param->tweak_mode) { 2821 found = false; 2822 2823 key->param.tweak_mode = strdup(param->tweak_mode); 2824 if (!key->param.tweak_mode) { 2825 rc = -ENOMEM; 2826 goto error; 2827 } 2828 2829 for (i = 0; i < SPDK_COUNTOF(g_tweak_modes); ++i) { 2830 assert(g_tweak_modes[i]); 2831 2832 if (strncmp(param->tweak_mode, g_tweak_modes[i], strlen(g_tweak_modes[i])) == 0) { 2833 key->tweak_mode = i; 2834 found = true; 2835 break; 2836 } 2837 } 2838 2839 if (!found) { 2840 SPDK_ERRLOG("Failed to parse tweak mode\n"); 2841 rc = -EINVAL; 2842 goto error; 2843 } 2844 } 2845 2846 if ((!module->crypto_supports_tweak_mode && key->tweak_mode != ACCEL_CRYPTO_TWEAK_MODE_DEFAULT) || 2847 (module->crypto_supports_tweak_mode && !module->crypto_supports_tweak_mode(key->tweak_mode))) { 2848 SPDK_ERRLOG("Module %s doesn't support %s tweak mode\n", module->name, 2849 g_tweak_modes[key->tweak_mode]); 2850 rc = -EINVAL; 2851 goto error; 2852 } 2853 2854 if (!module->crypto_supports_cipher(key->cipher, key->key_size)) { 2855 SPDK_ERRLOG("Module %s doesn't support %s cipher with %zu key size\n", module->name, 2856 g_ciphers[key->cipher], key->key_size); 2857 rc = -EINVAL; 2858 goto error; 2859 } 2860 2861 if (key->cipher == SPDK_ACCEL_CIPHER_AES_XTS) { 2862 if (!key->key2) { 2863 SPDK_ERRLOG("%s key2 is missing\n", g_ciphers[key->cipher]); 2864 rc = -EINVAL; 2865 goto error; 2866 } 2867 2868 if (key->key_size != key->key2_size) { 2869 SPDK_ERRLOG("%s key size %zu is not equal to key2 size %zu\n", g_ciphers[key->cipher], 2870 key->key_size, 2871 key->key2_size); 2872 rc = -EINVAL; 2873 goto error; 2874 } 2875 2876 if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) { 2877 SPDK_ERRLOG("%s identical keys are not secure\n", g_ciphers[key->cipher]); 2878 rc = -EINVAL; 2879 goto error; 2880 } 2881 } 2882 2883 if (key->cipher == SPDK_ACCEL_CIPHER_AES_CBC) { 2884 if (key->key2_size) { 2885 SPDK_ERRLOG("%s doesn't use key2\n", g_ciphers[key->cipher]); 2886 rc = -EINVAL; 2887 goto error; 2888 } 2889 } 2890 2891 key->module_if = module; 2892 2893 spdk_spin_lock(&g_keyring_spin); 2894 if (_accel_crypto_key_get(param->key_name)) { 2895 rc = -EEXIST; 2896 } else { 2897 rc = module->crypto_key_init(key); 2898 if (rc) { 2899 SPDK_ERRLOG("Module %s failed to initialize crypto key\n", module->name); 2900 } else { 2901 TAILQ_INSERT_TAIL(&g_keyring, key, link); 2902 } 2903 } 2904 spdk_spin_unlock(&g_keyring_spin); 2905 2906 if (rc) { 2907 goto error; 2908 } 2909 2910 return 0; 2911 2912 error: 2913 accel_crypto_key_free_mem(key); 2914 return rc; 2915 } 2916 2917 int 2918 spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key) 2919 { 2920 if (!key || !key->module_if) { 2921 return -EINVAL; 2922 } 2923 2924 spdk_spin_lock(&g_keyring_spin); 2925 if (!_accel_crypto_key_get(key->param.key_name)) { 2926 spdk_spin_unlock(&g_keyring_spin); 2927 return -ENOENT; 2928 } 2929 TAILQ_REMOVE(&g_keyring, key, link); 2930 spdk_spin_unlock(&g_keyring_spin); 2931 2932 accel_crypto_key_destroy_unsafe(key); 2933 2934 return 0; 2935 } 2936 2937 struct spdk_accel_crypto_key * 2938 spdk_accel_crypto_key_get(const char *name) 2939 { 2940 struct spdk_accel_crypto_key *key; 2941 2942 spdk_spin_lock(&g_keyring_spin); 2943 key = _accel_crypto_key_get(name); 2944 spdk_spin_unlock(&g_keyring_spin); 2945 2946 return key; 2947 } 2948 2949 /* Helper function when accel modules register with the framework. */ 2950 void 2951 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module) 2952 { 2953 struct spdk_accel_module_if *tmp; 2954 2955 if (_module_find_by_name(accel_module->name)) { 2956 SPDK_NOTICELOG("Module %s already registered\n", accel_module->name); 2957 assert(false); 2958 return; 2959 } 2960 2961 TAILQ_FOREACH(tmp, &spdk_accel_module_list, tailq) { 2962 if (accel_module->priority < tmp->priority) { 2963 break; 2964 } 2965 } 2966 2967 if (tmp != NULL) { 2968 TAILQ_INSERT_BEFORE(tmp, accel_module, tailq); 2969 } else { 2970 TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq); 2971 } 2972 } 2973 2974 /* Framework level channel create callback. */ 2975 static int 2976 accel_create_channel(void *io_device, void *ctx_buf) 2977 { 2978 struct accel_io_channel *accel_ch = ctx_buf; 2979 struct spdk_accel_task *accel_task; 2980 struct spdk_accel_task_aux_data *accel_task_aux; 2981 struct spdk_accel_sequence *seq; 2982 struct accel_buffer *buf; 2983 size_t task_size_aligned; 2984 uint8_t *task_mem; 2985 uint32_t i = 0, j; 2986 int rc; 2987 2988 task_size_aligned = SPDK_ALIGN_CEIL(g_max_accel_module_size, SPDK_CACHE_LINE_SIZE); 2989 accel_ch->task_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE, 2990 g_opts.task_count * task_size_aligned); 2991 if (!accel_ch->task_pool_base) { 2992 return -ENOMEM; 2993 } 2994 memset(accel_ch->task_pool_base, 0, g_opts.task_count * task_size_aligned); 2995 2996 accel_ch->seq_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE, 2997 g_opts.sequence_count * sizeof(struct spdk_accel_sequence)); 2998 if (accel_ch->seq_pool_base == NULL) { 2999 goto err; 3000 } 3001 memset(accel_ch->seq_pool_base, 0, g_opts.sequence_count * sizeof(struct spdk_accel_sequence)); 3002 3003 accel_ch->task_aux_data_base = calloc(g_opts.task_count, sizeof(struct spdk_accel_task_aux_data)); 3004 if (accel_ch->task_aux_data_base == NULL) { 3005 goto err; 3006 } 3007 3008 accel_ch->buf_pool_base = calloc(g_opts.buf_count, sizeof(struct accel_buffer)); 3009 if (accel_ch->buf_pool_base == NULL) { 3010 goto err; 3011 } 3012 3013 STAILQ_INIT(&accel_ch->task_pool); 3014 SLIST_INIT(&accel_ch->task_aux_data_pool); 3015 SLIST_INIT(&accel_ch->seq_pool); 3016 SLIST_INIT(&accel_ch->buf_pool); 3017 3018 task_mem = accel_ch->task_pool_base; 3019 for (i = 0; i < g_opts.task_count; i++) { 3020 accel_task = (struct spdk_accel_task *)task_mem; 3021 accel_task->aux = NULL; 3022 STAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link); 3023 task_mem += task_size_aligned; 3024 accel_task_aux = &accel_ch->task_aux_data_base[i]; 3025 SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task_aux, link); 3026 } 3027 for (i = 0; i < g_opts.sequence_count; i++) { 3028 seq = &accel_ch->seq_pool_base[i]; 3029 SLIST_INSERT_HEAD(&accel_ch->seq_pool, seq, link); 3030 } 3031 for (i = 0; i < g_opts.buf_count; i++) { 3032 buf = &accel_ch->buf_pool_base[i]; 3033 SLIST_INSERT_HEAD(&accel_ch->buf_pool, buf, link); 3034 } 3035 3036 /* Assign modules and get IO channels for each */ 3037 for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) { 3038 accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel(); 3039 /* This can happen if idxd runs out of channels. */ 3040 if (accel_ch->module_ch[i] == NULL) { 3041 SPDK_ERRLOG("Module %s failed to get io channel\n", g_modules_opc[i].module->name); 3042 goto err; 3043 } 3044 } 3045 3046 if (g_accel_driver != NULL) { 3047 accel_ch->driver_channel = g_accel_driver->get_io_channel(); 3048 if (accel_ch->driver_channel == NULL) { 3049 SPDK_ERRLOG("Failed to get driver's IO channel\n"); 3050 goto err; 3051 } 3052 } 3053 3054 rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", g_opts.small_cache_size, 3055 g_opts.large_cache_size); 3056 if (rc != 0) { 3057 SPDK_ERRLOG("Failed to initialize iobuf accel channel\n"); 3058 goto err; 3059 } 3060 3061 return 0; 3062 err: 3063 if (accel_ch->driver_channel != NULL) { 3064 spdk_put_io_channel(accel_ch->driver_channel); 3065 } 3066 for (j = 0; j < i; j++) { 3067 spdk_put_io_channel(accel_ch->module_ch[j]); 3068 } 3069 free(accel_ch->task_pool_base); 3070 free(accel_ch->task_aux_data_base); 3071 free(accel_ch->seq_pool_base); 3072 free(accel_ch->buf_pool_base); 3073 3074 return -ENOMEM; 3075 } 3076 3077 static void 3078 accel_add_stats(struct accel_stats *total, struct accel_stats *stats) 3079 { 3080 int i; 3081 3082 total->sequence_executed += stats->sequence_executed; 3083 total->sequence_failed += stats->sequence_failed; 3084 total->sequence_outstanding += stats->sequence_outstanding; 3085 total->task_outstanding += stats->task_outstanding; 3086 total->retry.task += stats->retry.task; 3087 total->retry.sequence += stats->retry.sequence; 3088 total->retry.iobuf += stats->retry.iobuf; 3089 total->retry.bufdesc += stats->retry.bufdesc; 3090 for (i = 0; i < SPDK_ACCEL_OPC_LAST; ++i) { 3091 total->operations[i].executed += stats->operations[i].executed; 3092 total->operations[i].failed += stats->operations[i].failed; 3093 total->operations[i].num_bytes += stats->operations[i].num_bytes; 3094 } 3095 } 3096 3097 /* Framework level channel destroy callback. */ 3098 static void 3099 accel_destroy_channel(void *io_device, void *ctx_buf) 3100 { 3101 struct accel_io_channel *accel_ch = ctx_buf; 3102 int i; 3103 3104 spdk_iobuf_channel_fini(&accel_ch->iobuf); 3105 3106 if (accel_ch->driver_channel != NULL) { 3107 spdk_put_io_channel(accel_ch->driver_channel); 3108 } 3109 3110 for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) { 3111 assert(accel_ch->module_ch[i] != NULL); 3112 spdk_put_io_channel(accel_ch->module_ch[i]); 3113 accel_ch->module_ch[i] = NULL; 3114 } 3115 3116 /* Update global stats to make sure channel's stats aren't lost after a channel is gone */ 3117 spdk_spin_lock(&g_stats_lock); 3118 accel_add_stats(&g_stats, &accel_ch->stats); 3119 spdk_spin_unlock(&g_stats_lock); 3120 3121 free(accel_ch->task_pool_base); 3122 free(accel_ch->task_aux_data_base); 3123 free(accel_ch->seq_pool_base); 3124 free(accel_ch->buf_pool_base); 3125 } 3126 3127 struct spdk_io_channel * 3128 spdk_accel_get_io_channel(void) 3129 { 3130 return spdk_get_io_channel(&spdk_accel_module_list); 3131 } 3132 3133 static int 3134 accel_module_initialize(void) 3135 { 3136 struct spdk_accel_module_if *accel_module, *tmp_module; 3137 int rc = 0, module_rc; 3138 3139 TAILQ_FOREACH_SAFE(accel_module, &spdk_accel_module_list, tailq, tmp_module) { 3140 module_rc = accel_module->module_init(); 3141 if (module_rc) { 3142 TAILQ_REMOVE(&spdk_accel_module_list, accel_module, tailq); 3143 if (module_rc == -ENODEV) { 3144 SPDK_NOTICELOG("No devices for module %s, skipping\n", accel_module->name); 3145 } else if (!rc) { 3146 SPDK_ERRLOG("Module %s initialization failed with %d\n", accel_module->name, module_rc); 3147 rc = module_rc; 3148 } 3149 continue; 3150 } 3151 3152 SPDK_DEBUGLOG(accel, "Module %s initialized.\n", accel_module->name); 3153 } 3154 3155 return rc; 3156 } 3157 3158 static void 3159 accel_module_init_opcode(enum spdk_accel_opcode opcode) 3160 { 3161 struct accel_module *module = &g_modules_opc[opcode]; 3162 struct spdk_accel_module_if *module_if = module->module; 3163 3164 if (module_if->get_memory_domains != NULL) { 3165 module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0; 3166 } 3167 } 3168 3169 static int 3170 accel_memory_domain_translate(struct spdk_memory_domain *src_domain, void *src_domain_ctx, 3171 struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx, 3172 void *addr, size_t len, struct spdk_memory_domain_translation_result *result) 3173 { 3174 struct accel_buffer *buf = src_domain_ctx; 3175 3176 SPDK_DEBUGLOG(accel, "translate addr %p, len %zu\n", addr, len); 3177 3178 assert(g_accel_domain == src_domain); 3179 assert(spdk_memory_domain_get_system_domain() == dst_domain); 3180 assert(buf->buf == NULL); 3181 assert(addr == ACCEL_BUFFER_BASE); 3182 assert(len == buf->len); 3183 3184 buf->buf = spdk_iobuf_get(&buf->ch->iobuf, buf->len, NULL, NULL); 3185 if (spdk_unlikely(buf->buf == NULL)) { 3186 return -ENOMEM; 3187 } 3188 3189 result->iov_count = 1; 3190 result->iov.iov_base = buf->buf; 3191 result->iov.iov_len = buf->len; 3192 SPDK_DEBUGLOG(accel, "translated addr %p\n", result->iov.iov_base); 3193 return 0; 3194 } 3195 3196 static void 3197 accel_memory_domain_invalidate(struct spdk_memory_domain *domain, void *domain_ctx, 3198 struct iovec *iov, uint32_t iovcnt) 3199 { 3200 struct accel_buffer *buf = domain_ctx; 3201 3202 SPDK_DEBUGLOG(accel, "invalidate addr %p, len %zu\n", iov[0].iov_base, iov[0].iov_len); 3203 3204 assert(g_accel_domain == domain); 3205 assert(iovcnt == 1); 3206 assert(buf->buf != NULL); 3207 assert(iov[0].iov_base == buf->buf); 3208 assert(iov[0].iov_len == buf->len); 3209 3210 spdk_iobuf_put(&buf->ch->iobuf, buf->buf, buf->len); 3211 buf->buf = NULL; 3212 } 3213 3214 int 3215 spdk_accel_initialize(void) 3216 { 3217 enum spdk_accel_opcode op; 3218 struct spdk_accel_module_if *accel_module = NULL; 3219 int rc; 3220 3221 /* 3222 * We need a unique identifier for the accel framework, so use the 3223 * spdk_accel_module_list address for this purpose. 3224 */ 3225 spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel, 3226 sizeof(struct accel_io_channel), "accel"); 3227 3228 spdk_spin_init(&g_keyring_spin); 3229 spdk_spin_init(&g_stats_lock); 3230 3231 rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL, 3232 "SPDK_ACCEL_DMA_DEVICE"); 3233 if (rc != 0) { 3234 SPDK_ERRLOG("Failed to create accel memory domain\n"); 3235 return rc; 3236 } 3237 3238 spdk_memory_domain_set_translation(g_accel_domain, accel_memory_domain_translate); 3239 spdk_memory_domain_set_invalidate(g_accel_domain, accel_memory_domain_invalidate); 3240 3241 g_modules_started = true; 3242 rc = accel_module_initialize(); 3243 if (rc) { 3244 return rc; 3245 } 3246 3247 if (g_accel_driver != NULL && g_accel_driver->init != NULL) { 3248 rc = g_accel_driver->init(); 3249 if (rc != 0) { 3250 SPDK_ERRLOG("Failed to initialize driver %s: %s\n", g_accel_driver->name, 3251 spdk_strerror(-rc)); 3252 return rc; 3253 } 3254 } 3255 3256 /* The module list is order by priority, with the highest priority modules being at the end 3257 * of the list. The software module should be somewhere at the beginning of the list, 3258 * before all HW modules. 3259 * NOTE: all opcodes must be supported by software in the event that no HW modules are 3260 * initialized to support the operation. 3261 */ 3262 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 3263 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 3264 if (accel_module->supports_opcode(op)) { 3265 g_modules_opc[op].module = accel_module; 3266 SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name); 3267 } 3268 } 3269 3270 if (accel_module->get_ctx_size != NULL) { 3271 g_max_accel_module_size = spdk_max(g_max_accel_module_size, 3272 accel_module->get_ctx_size()); 3273 } 3274 } 3275 3276 /* Now lets check for overrides and apply all that exist */ 3277 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 3278 if (g_modules_opc_override[op] != NULL) { 3279 accel_module = _module_find_by_name(g_modules_opc_override[op]); 3280 if (accel_module == NULL) { 3281 SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]); 3282 return -EINVAL; 3283 } 3284 if (accel_module->supports_opcode(op) == false) { 3285 SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op); 3286 return -EINVAL; 3287 } 3288 g_modules_opc[op].module = accel_module; 3289 } 3290 } 3291 3292 if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) { 3293 SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations"); 3294 return -EINVAL; 3295 } 3296 if (g_modules_opc[SPDK_ACCEL_OPC_COMPRESS].module != 3297 g_modules_opc[SPDK_ACCEL_OPC_DECOMPRESS].module) { 3298 SPDK_ERRLOG("Different accel modules are assigned to compress and decompress operations"); 3299 return -EINVAL; 3300 } 3301 3302 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 3303 assert(g_modules_opc[op].module != NULL); 3304 accel_module_init_opcode(op); 3305 } 3306 3307 rc = spdk_iobuf_register_module("accel"); 3308 if (rc != 0) { 3309 SPDK_ERRLOG("Failed to register accel iobuf module\n"); 3310 return rc; 3311 } 3312 3313 return 0; 3314 } 3315 3316 static void 3317 accel_module_finish_cb(void) 3318 { 3319 spdk_accel_fini_cb cb_fn = g_fini_cb_fn; 3320 3321 cb_fn(g_fini_cb_arg); 3322 g_fini_cb_fn = NULL; 3323 g_fini_cb_arg = NULL; 3324 } 3325 3326 static void 3327 accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str, 3328 const char *module_str) 3329 { 3330 spdk_json_write_object_begin(w); 3331 spdk_json_write_named_string(w, "method", "accel_assign_opc"); 3332 spdk_json_write_named_object_begin(w, "params"); 3333 spdk_json_write_named_string(w, "opname", opc_str); 3334 spdk_json_write_named_string(w, "module", module_str); 3335 spdk_json_write_object_end(w); 3336 spdk_json_write_object_end(w); 3337 } 3338 3339 static void 3340 __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key) 3341 { 3342 spdk_json_write_named_string(w, "name", key->param.key_name); 3343 spdk_json_write_named_string(w, "cipher", key->param.cipher); 3344 spdk_json_write_named_string(w, "key", key->param.hex_key); 3345 if (key->param.hex_key2) { 3346 spdk_json_write_named_string(w, "key2", key->param.hex_key2); 3347 } 3348 3349 if (key->param.tweak_mode) { 3350 spdk_json_write_named_string(w, "tweak_mode", key->param.tweak_mode); 3351 } 3352 } 3353 3354 void 3355 _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key) 3356 { 3357 spdk_json_write_object_begin(w); 3358 __accel_crypto_key_dump_param(w, key); 3359 spdk_json_write_object_end(w); 3360 } 3361 3362 static void 3363 _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w, 3364 struct spdk_accel_crypto_key *key) 3365 { 3366 spdk_json_write_object_begin(w); 3367 spdk_json_write_named_string(w, "method", "accel_crypto_key_create"); 3368 spdk_json_write_named_object_begin(w, "params"); 3369 __accel_crypto_key_dump_param(w, key); 3370 spdk_json_write_object_end(w); 3371 spdk_json_write_object_end(w); 3372 } 3373 3374 static void 3375 accel_write_options(struct spdk_json_write_ctx *w) 3376 { 3377 spdk_json_write_object_begin(w); 3378 spdk_json_write_named_string(w, "method", "accel_set_options"); 3379 spdk_json_write_named_object_begin(w, "params"); 3380 spdk_json_write_named_uint32(w, "small_cache_size", g_opts.small_cache_size); 3381 spdk_json_write_named_uint32(w, "large_cache_size", g_opts.large_cache_size); 3382 spdk_json_write_named_uint32(w, "task_count", g_opts.task_count); 3383 spdk_json_write_named_uint32(w, "sequence_count", g_opts.sequence_count); 3384 spdk_json_write_named_uint32(w, "buf_count", g_opts.buf_count); 3385 spdk_json_write_object_end(w); 3386 spdk_json_write_object_end(w); 3387 } 3388 3389 static void 3390 _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump) 3391 { 3392 struct spdk_accel_crypto_key *key; 3393 3394 spdk_spin_lock(&g_keyring_spin); 3395 TAILQ_FOREACH(key, &g_keyring, link) { 3396 if (full_dump) { 3397 _accel_crypto_key_write_config_json(w, key); 3398 } else { 3399 _accel_crypto_key_dump_param(w, key); 3400 } 3401 } 3402 spdk_spin_unlock(&g_keyring_spin); 3403 } 3404 3405 void 3406 _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w) 3407 { 3408 _accel_crypto_keys_write_config_json(w, false); 3409 } 3410 3411 void 3412 spdk_accel_write_config_json(struct spdk_json_write_ctx *w) 3413 { 3414 struct spdk_accel_module_if *accel_module; 3415 int i; 3416 3417 spdk_json_write_array_begin(w); 3418 accel_write_options(w); 3419 3420 if (g_accel_driver != NULL) { 3421 spdk_json_write_object_begin(w); 3422 spdk_json_write_named_string(w, "method", "accel_set_driver"); 3423 spdk_json_write_named_object_begin(w, "params"); 3424 spdk_json_write_named_string(w, "name", g_accel_driver->name); 3425 spdk_json_write_object_end(w); 3426 spdk_json_write_object_end(w); 3427 } 3428 3429 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 3430 if (accel_module->write_config_json) { 3431 accel_module->write_config_json(w); 3432 } 3433 } 3434 for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) { 3435 if (g_modules_opc_override[i]) { 3436 accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]); 3437 } 3438 } 3439 3440 _accel_crypto_keys_write_config_json(w, true); 3441 3442 spdk_json_write_array_end(w); 3443 } 3444 3445 void 3446 spdk_accel_module_finish(void) 3447 { 3448 if (!g_accel_module) { 3449 g_accel_module = TAILQ_FIRST(&spdk_accel_module_list); 3450 } else { 3451 g_accel_module = TAILQ_NEXT(g_accel_module, tailq); 3452 } 3453 3454 if (!g_accel_module) { 3455 if (g_accel_driver != NULL && g_accel_driver->fini != NULL) { 3456 g_accel_driver->fini(); 3457 } 3458 3459 spdk_spin_destroy(&g_keyring_spin); 3460 spdk_spin_destroy(&g_stats_lock); 3461 if (g_accel_domain) { 3462 spdk_memory_domain_destroy(g_accel_domain); 3463 g_accel_domain = NULL; 3464 } 3465 accel_module_finish_cb(); 3466 return; 3467 } 3468 3469 if (g_accel_module->module_fini) { 3470 spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL); 3471 } else { 3472 spdk_accel_module_finish(); 3473 } 3474 } 3475 3476 static void 3477 accel_io_device_unregister_cb(void *io_device) 3478 { 3479 struct spdk_accel_crypto_key *key, *key_tmp; 3480 enum spdk_accel_opcode op; 3481 3482 spdk_spin_lock(&g_keyring_spin); 3483 TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) { 3484 accel_crypto_key_destroy_unsafe(key); 3485 } 3486 spdk_spin_unlock(&g_keyring_spin); 3487 3488 for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) { 3489 if (g_modules_opc_override[op] != NULL) { 3490 free(g_modules_opc_override[op]); 3491 g_modules_opc_override[op] = NULL; 3492 } 3493 g_modules_opc[op].module = NULL; 3494 } 3495 3496 spdk_accel_module_finish(); 3497 } 3498 3499 void 3500 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg) 3501 { 3502 assert(cb_fn != NULL); 3503 3504 g_fini_cb_fn = cb_fn; 3505 g_fini_cb_arg = cb_arg; 3506 3507 spdk_io_device_unregister(&spdk_accel_module_list, accel_io_device_unregister_cb); 3508 } 3509 3510 static struct spdk_accel_driver * 3511 accel_find_driver(const char *name) 3512 { 3513 struct spdk_accel_driver *driver; 3514 3515 TAILQ_FOREACH(driver, &g_accel_drivers, tailq) { 3516 if (strcmp(driver->name, name) == 0) { 3517 return driver; 3518 } 3519 } 3520 3521 return NULL; 3522 } 3523 3524 int 3525 spdk_accel_set_driver(const char *name) 3526 { 3527 struct spdk_accel_driver *driver = NULL; 3528 3529 if (name != NULL && name[0] != '\0') { 3530 driver = accel_find_driver(name); 3531 if (driver == NULL) { 3532 SPDK_ERRLOG("Couldn't find driver named '%s'\n", name); 3533 return -ENODEV; 3534 } 3535 } 3536 3537 g_accel_driver = driver; 3538 3539 return 0; 3540 } 3541 3542 const char * 3543 spdk_accel_get_driver_name(void) 3544 { 3545 if (!g_accel_driver) { 3546 return NULL; 3547 } 3548 3549 return g_accel_driver->name; 3550 } 3551 3552 void 3553 spdk_accel_driver_register(struct spdk_accel_driver *driver) 3554 { 3555 if (accel_find_driver(driver->name)) { 3556 SPDK_ERRLOG("Driver named '%s' has already been registered\n", driver->name); 3557 assert(0); 3558 return; 3559 } 3560 3561 TAILQ_INSERT_TAIL(&g_accel_drivers, driver, tailq); 3562 } 3563 3564 int 3565 spdk_accel_set_opts(const struct spdk_accel_opts *opts) 3566 { 3567 if (!opts) { 3568 SPDK_ERRLOG("opts cannot be NULL\n"); 3569 return -1; 3570 } 3571 3572 if (!opts->opts_size) { 3573 SPDK_ERRLOG("opts_size inside opts cannot be zero value\n"); 3574 return -1; 3575 } 3576 3577 if (SPDK_GET_FIELD(opts, task_count, g_opts.task_count, 3578 opts->opts_size) < ACCEL_TASKS_IN_SEQUENCE_LIMIT) { 3579 return -EINVAL; 3580 } 3581 3582 #define SET_FIELD(field) \ 3583 if (offsetof(struct spdk_accel_opts, field) + sizeof(opts->field) <= opts->opts_size) { \ 3584 g_opts.field = opts->field; \ 3585 } \ 3586 3587 SET_FIELD(small_cache_size); 3588 SET_FIELD(large_cache_size); 3589 SET_FIELD(task_count); 3590 SET_FIELD(sequence_count); 3591 SET_FIELD(buf_count); 3592 3593 g_opts.opts_size = opts->opts_size; 3594 3595 #undef SET_FIELD 3596 3597 return 0; 3598 } 3599 3600 void 3601 spdk_accel_get_opts(struct spdk_accel_opts *opts, size_t opts_size) 3602 { 3603 if (!opts) { 3604 SPDK_ERRLOG("opts should not be NULL\n"); 3605 return; 3606 } 3607 3608 if (!opts_size) { 3609 SPDK_ERRLOG("opts_size should not be zero value\n"); 3610 return; 3611 } 3612 3613 opts->opts_size = opts_size; 3614 3615 #define SET_FIELD(field) \ 3616 if (offsetof(struct spdk_accel_opts, field) + sizeof(opts->field) <= opts_size) { \ 3617 opts->field = g_opts.field; \ 3618 } \ 3619 3620 SET_FIELD(small_cache_size); 3621 SET_FIELD(large_cache_size); 3622 SET_FIELD(task_count); 3623 SET_FIELD(sequence_count); 3624 SET_FIELD(buf_count); 3625 3626 #undef SET_FIELD 3627 3628 /* Do not remove this statement, you should always update this statement when you adding a new field, 3629 * and do not forget to add the SET_FIELD statement for your added field. */ 3630 SPDK_STATIC_ASSERT(sizeof(struct spdk_accel_opts) == 28, "Incorrect size"); 3631 } 3632 3633 struct accel_get_stats_ctx { 3634 struct accel_stats stats; 3635 accel_get_stats_cb cb_fn; 3636 void *cb_arg; 3637 }; 3638 3639 static void 3640 accel_get_channel_stats_done(struct spdk_io_channel_iter *iter, int status) 3641 { 3642 struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter); 3643 3644 ctx->cb_fn(&ctx->stats, ctx->cb_arg); 3645 free(ctx); 3646 } 3647 3648 static void 3649 accel_get_channel_stats(struct spdk_io_channel_iter *iter) 3650 { 3651 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(iter); 3652 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 3653 struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter); 3654 3655 accel_add_stats(&ctx->stats, &accel_ch->stats); 3656 spdk_for_each_channel_continue(iter, 0); 3657 } 3658 3659 int 3660 accel_get_stats(accel_get_stats_cb cb_fn, void *cb_arg) 3661 { 3662 struct accel_get_stats_ctx *ctx; 3663 3664 ctx = calloc(1, sizeof(*ctx)); 3665 if (ctx == NULL) { 3666 return -ENOMEM; 3667 } 3668 3669 spdk_spin_lock(&g_stats_lock); 3670 accel_add_stats(&ctx->stats, &g_stats); 3671 spdk_spin_unlock(&g_stats_lock); 3672 3673 ctx->cb_fn = cb_fn; 3674 ctx->cb_arg = cb_arg; 3675 3676 spdk_for_each_channel(&spdk_accel_module_list, accel_get_channel_stats, ctx, 3677 accel_get_channel_stats_done); 3678 3679 return 0; 3680 } 3681 3682 void 3683 spdk_accel_get_opcode_stats(struct spdk_io_channel *ch, enum spdk_accel_opcode opcode, 3684 struct spdk_accel_opcode_stats *stats, size_t size) 3685 { 3686 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 3687 3688 #define FIELD_OK(field) \ 3689 offsetof(struct spdk_accel_opcode_stats, field) + sizeof(stats->field) <= size 3690 3691 #define SET_FIELD(field, value) \ 3692 if (FIELD_OK(field)) { \ 3693 stats->field = value; \ 3694 } 3695 3696 SET_FIELD(executed, accel_ch->stats.operations[opcode].executed); 3697 SET_FIELD(failed, accel_ch->stats.operations[opcode].failed); 3698 SET_FIELD(num_bytes, accel_ch->stats.operations[opcode].num_bytes); 3699 3700 #undef FIELD_OK 3701 #undef SET_FIELD 3702 } 3703 3704 uint8_t 3705 spdk_accel_get_buf_align(enum spdk_accel_opcode opcode, 3706 const struct spdk_accel_operation_exec_ctx *ctx) 3707 { 3708 struct spdk_accel_module_if *module = g_modules_opc[opcode].module; 3709 struct spdk_accel_opcode_info modinfo = {}, drvinfo = {}; 3710 3711 if (g_accel_driver != NULL && g_accel_driver->get_operation_info != NULL) { 3712 g_accel_driver->get_operation_info(opcode, ctx, &drvinfo); 3713 } 3714 3715 if (module->get_operation_info != NULL) { 3716 module->get_operation_info(opcode, ctx, &modinfo); 3717 } 3718 3719 /* If a driver is set, it'll execute most of the operations, while the rest will usually 3720 * fall back to accel_sw, which doesn't have any alignment requirements. However, to be 3721 * extra safe, return the max(driver, module) if a driver delegates some operations to a 3722 * hardware module. */ 3723 return spdk_max(modinfo.required_alignment, drvinfo.required_alignment); 3724 } 3725 3726 struct spdk_accel_module_if * 3727 spdk_accel_get_module(const char *name) 3728 { 3729 struct spdk_accel_module_if *module; 3730 3731 TAILQ_FOREACH(module, &spdk_accel_module_list, tailq) { 3732 if (strcmp(module->name, name) == 0) { 3733 return module; 3734 } 3735 } 3736 3737 return NULL; 3738 } 3739 3740 int 3741 spdk_accel_get_opc_memory_domains(enum spdk_accel_opcode opcode, 3742 struct spdk_memory_domain **domains, 3743 int array_size) 3744 { 3745 assert(opcode < SPDK_ACCEL_OPC_LAST); 3746 3747 if (g_modules_opc[opcode].module->get_memory_domains) { 3748 return g_modules_opc[opcode].module->get_memory_domains(domains, array_size); 3749 } 3750 3751 return 0; 3752 } 3753 3754 SPDK_LOG_REGISTER_COMPONENT(accel) 3755