1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2020 Intel Corporation. 3 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. 4 * All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 9 #include "spdk_internal/accel_module.h" 10 11 #include "accel_internal.h" 12 13 #include "spdk/env.h" 14 #include "spdk/likely.h" 15 #include "spdk/log.h" 16 #include "spdk/thread.h" 17 #include "spdk/json.h" 18 #include "spdk/crc32.h" 19 #include "spdk/util.h" 20 21 /* Accelerator Framework: The following provides a top level 22 * generic API for the accelerator functions defined here. Modules, 23 * such as the one in /module/accel/ioat, supply the implementation 24 * with the exception of the pure software implementation contained 25 * later in this file. 26 */ 27 28 #define ALIGN_4K 0x1000 29 #define MAX_TASKS_PER_CHANNEL 0x800 30 31 /* Largest context size for all accel modules */ 32 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task); 33 34 static struct spdk_accel_module_if *g_accel_module = NULL; 35 static spdk_accel_fini_cb g_fini_cb_fn = NULL; 36 static void *g_fini_cb_arg = NULL; 37 static bool g_modules_started = false; 38 39 /* Global list of registered accelerator modules */ 40 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list = 41 TAILQ_HEAD_INITIALIZER(spdk_accel_module_list); 42 43 /* Global array mapping capabilities to modules */ 44 static struct spdk_accel_module_if *g_modules_opc[ACCEL_OPC_LAST] = {}; 45 static char *g_modules_opc_override[ACCEL_OPC_LAST] = {}; 46 47 static const char *g_opcode_strings[ACCEL_OPC_LAST] = { 48 "copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c", 49 "compress", "decompress" 50 }; 51 52 struct accel_io_channel { 53 struct spdk_io_channel *module_ch[ACCEL_OPC_LAST]; 54 void *task_pool_base; 55 TAILQ_HEAD(, spdk_accel_task) task_pool; 56 }; 57 58 int 59 spdk_accel_get_opc_module_name(enum accel_opcode opcode, const char **module_name) 60 { 61 if (opcode >= ACCEL_OPC_LAST) { 62 /* invalid opcode */ 63 return -EINVAL; 64 } 65 66 if (g_modules_opc[opcode]) { 67 *module_name = g_modules_opc[opcode]->name; 68 } else { 69 return -ENOENT; 70 } 71 72 return 0; 73 } 74 75 void 76 _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn) 77 { 78 struct spdk_accel_module_if *accel_module; 79 enum accel_opcode opcode; 80 int j = 0; 81 82 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 83 for (opcode = 0; opcode < ACCEL_OPC_LAST; opcode++) { 84 if (accel_module->supports_opcode(opcode)) { 85 info->ops[j] = opcode; 86 j++; 87 } 88 } 89 info->name = accel_module->name; 90 info->num_ops = j; 91 fn(info); 92 j = 0; 93 } 94 } 95 96 int 97 _accel_get_opc_name(enum accel_opcode opcode, const char **opcode_name) 98 { 99 int rc = 0; 100 101 if (opcode < ACCEL_OPC_LAST) { 102 *opcode_name = g_opcode_strings[opcode]; 103 } else { 104 /* invalid opcode */ 105 rc = -EINVAL; 106 } 107 108 return rc; 109 } 110 111 int 112 spdk_accel_assign_opc(enum accel_opcode opcode, const char *name) 113 { 114 if (g_modules_started == true) { 115 /* we don't allow re-assignment once things have started */ 116 return -EINVAL; 117 } 118 119 if (opcode >= ACCEL_OPC_LAST) { 120 /* invalid opcode */ 121 return -EINVAL; 122 } 123 124 /* module selection will be validated after the framework starts. */ 125 g_modules_opc_override[opcode] = strdup(name); 126 127 return 0; 128 } 129 130 void 131 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status) 132 { 133 struct accel_io_channel *accel_ch = accel_task->accel_ch; 134 spdk_accel_completion_cb cb_fn = accel_task->cb_fn; 135 void *cb_arg = accel_task->cb_arg; 136 137 /* We should put the accel_task into the list firstly in order to avoid 138 * the accel task list is exhausted when there is recursive call to 139 * allocate accel_task in user's call back function (cb_fn) 140 */ 141 TAILQ_INSERT_HEAD(&accel_ch->task_pool, accel_task, link); 142 143 cb_fn(cb_arg, status); 144 } 145 146 inline static struct spdk_accel_task * 147 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg) 148 { 149 struct spdk_accel_task *accel_task; 150 151 accel_task = TAILQ_FIRST(&accel_ch->task_pool); 152 if (accel_task == NULL) { 153 return NULL; 154 } 155 156 TAILQ_REMOVE(&accel_ch->task_pool, accel_task, link); 157 accel_task->link.tqe_next = NULL; 158 accel_task->link.tqe_prev = NULL; 159 160 accel_task->cb_fn = cb_fn; 161 accel_task->cb_arg = cb_arg; 162 accel_task->accel_ch = accel_ch; 163 164 return accel_task; 165 } 166 167 168 169 /* Accel framework public API for copy function */ 170 int 171 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src, 172 uint64_t nbytes, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg) 173 { 174 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 175 struct spdk_accel_task *accel_task; 176 struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_COPY]; 177 struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_COPY]; 178 179 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 180 if (accel_task == NULL) { 181 return -ENOMEM; 182 } 183 184 accel_task->dst = dst; 185 accel_task->src = src; 186 accel_task->op_code = ACCEL_OPC_COPY; 187 accel_task->nbytes = nbytes; 188 accel_task->flags = flags; 189 190 return module->submit_tasks(module_ch, accel_task); 191 } 192 193 /* Accel framework public API for dual cast copy function */ 194 int 195 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1, 196 void *dst2, void *src, uint64_t nbytes, int flags, 197 spdk_accel_completion_cb cb_fn, void *cb_arg) 198 { 199 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 200 struct spdk_accel_task *accel_task; 201 struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_DUALCAST]; 202 struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_DUALCAST]; 203 204 if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) { 205 SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n"); 206 return -EINVAL; 207 } 208 209 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 210 if (accel_task == NULL) { 211 return -ENOMEM; 212 } 213 214 accel_task->src = src; 215 accel_task->dst = dst1; 216 accel_task->dst2 = dst2; 217 accel_task->nbytes = nbytes; 218 accel_task->flags = flags; 219 accel_task->op_code = ACCEL_OPC_DUALCAST; 220 221 return module->submit_tasks(module_ch, accel_task); 222 } 223 224 /* Accel framework public API for compare function */ 225 int 226 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1, 227 void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn, 228 void *cb_arg) 229 { 230 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 231 struct spdk_accel_task *accel_task; 232 struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_COMPARE]; 233 struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_COMPARE]; 234 235 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 236 if (accel_task == NULL) { 237 return -ENOMEM; 238 } 239 240 accel_task->src = src1; 241 accel_task->src2 = src2; 242 accel_task->nbytes = nbytes; 243 accel_task->op_code = ACCEL_OPC_COMPARE; 244 245 return module->submit_tasks(module_ch, accel_task); 246 } 247 248 /* Accel framework public API for fill function */ 249 int 250 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst, 251 uint8_t fill, uint64_t nbytes, int flags, 252 spdk_accel_completion_cb cb_fn, void *cb_arg) 253 { 254 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 255 struct spdk_accel_task *accel_task; 256 struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_FILL]; 257 struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_FILL]; 258 259 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 260 if (accel_task == NULL) { 261 return -ENOMEM; 262 } 263 264 accel_task->dst = dst; 265 memset(&accel_task->fill_pattern, fill, sizeof(uint64_t)); 266 accel_task->nbytes = nbytes; 267 accel_task->flags = flags; 268 accel_task->op_code = ACCEL_OPC_FILL; 269 270 return module->submit_tasks(module_ch, accel_task); 271 } 272 273 /* Accel framework public API for CRC-32C function */ 274 int 275 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst, 276 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn, 277 void *cb_arg) 278 { 279 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 280 struct spdk_accel_task *accel_task; 281 struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_CRC32C]; 282 struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_CRC32C]; 283 284 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 285 if (accel_task == NULL) { 286 return -ENOMEM; 287 } 288 289 accel_task->crc_dst = crc_dst; 290 accel_task->src = src; 291 accel_task->s.iovcnt = 0; 292 accel_task->seed = seed; 293 accel_task->nbytes = nbytes; 294 accel_task->op_code = ACCEL_OPC_CRC32C; 295 296 return module->submit_tasks(module_ch, accel_task); 297 } 298 299 /* Accel framework public API for chained CRC-32C function */ 300 int 301 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst, 302 struct iovec *iov, uint32_t iov_cnt, uint32_t seed, 303 spdk_accel_completion_cb cb_fn, void *cb_arg) 304 { 305 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 306 struct spdk_accel_task *accel_task; 307 struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_CRC32C]; 308 struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_CRC32C]; 309 310 if (iov == NULL) { 311 SPDK_ERRLOG("iov should not be NULL"); 312 return -EINVAL; 313 } 314 315 if (!iov_cnt) { 316 SPDK_ERRLOG("iovcnt should not be zero value\n"); 317 return -EINVAL; 318 } 319 320 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 321 if (accel_task == NULL) { 322 SPDK_ERRLOG("no memory\n"); 323 assert(0); 324 return -ENOMEM; 325 } 326 327 accel_task->s.iovs = iov; 328 accel_task->s.iovcnt = iov_cnt; 329 accel_task->crc_dst = crc_dst; 330 accel_task->seed = seed; 331 accel_task->op_code = ACCEL_OPC_CRC32C; 332 333 return module->submit_tasks(module_ch, accel_task); 334 } 335 336 /* Accel framework public API for copy with CRC-32C function */ 337 int 338 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst, 339 void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes, 340 int flags, spdk_accel_completion_cb cb_fn, void *cb_arg) 341 { 342 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 343 struct spdk_accel_task *accel_task; 344 struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_COPY_CRC32C]; 345 struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_COPY_CRC32C]; 346 347 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 348 if (accel_task == NULL) { 349 return -ENOMEM; 350 } 351 352 accel_task->dst = dst; 353 accel_task->src = src; 354 accel_task->crc_dst = crc_dst; 355 accel_task->s.iovcnt = 0; 356 accel_task->seed = seed; 357 accel_task->nbytes = nbytes; 358 accel_task->flags = flags; 359 accel_task->op_code = ACCEL_OPC_COPY_CRC32C; 360 361 return module->submit_tasks(module_ch, accel_task); 362 } 363 364 /* Accel framework public API for chained copy + CRC-32C function */ 365 int 366 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst, 367 struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst, 368 uint32_t seed, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg) 369 { 370 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 371 struct spdk_accel_task *accel_task; 372 struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_COPY_CRC32C]; 373 struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_COPY_CRC32C]; 374 uint64_t nbytes; 375 uint32_t i; 376 377 if (src_iovs == NULL) { 378 SPDK_ERRLOG("iov should not be NULL"); 379 return -EINVAL; 380 } 381 382 if (!iov_cnt) { 383 SPDK_ERRLOG("iovcnt should not be zero value\n"); 384 return -EINVAL; 385 } 386 387 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 388 if (accel_task == NULL) { 389 SPDK_ERRLOG("no memory\n"); 390 assert(0); 391 return -ENOMEM; 392 } 393 394 nbytes = 0; 395 for (i = 0; i < iov_cnt; i++) { 396 nbytes += src_iovs[i].iov_len; 397 } 398 399 accel_task->s.iovs = src_iovs; 400 accel_task->s.iovcnt = iov_cnt; 401 accel_task->dst = (void *)dst; 402 accel_task->crc_dst = crc_dst; 403 accel_task->seed = seed; 404 accel_task->nbytes = nbytes; 405 accel_task->flags = flags; 406 accel_task->op_code = ACCEL_OPC_COPY_CRC32C; 407 408 return module->submit_tasks(module_ch, accel_task); 409 } 410 411 int 412 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes, 413 struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size, int flags, 414 spdk_accel_completion_cb cb_fn, void *cb_arg) 415 { 416 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 417 struct spdk_accel_task *accel_task; 418 struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_COMPRESS]; 419 struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_COMPRESS]; 420 size_t i, src_len = 0; 421 422 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 423 if (accel_task == NULL) { 424 return -ENOMEM; 425 } 426 427 for (i = 0; i < src_iovcnt; i++) { 428 src_len += src_iovs[i].iov_len; 429 } 430 431 accel_task->nbytes = src_len; 432 accel_task->output_size = output_size; 433 accel_task->s.iovs = src_iovs; 434 accel_task->s.iovcnt = src_iovcnt; 435 accel_task->dst = dst; 436 accel_task->nbytes_dst = nbytes; 437 accel_task->flags = flags; 438 accel_task->op_code = ACCEL_OPC_COMPRESS; 439 440 return module->submit_tasks(module_ch, accel_task); 441 442 return 0; 443 } 444 445 int 446 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs, 447 size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt, 448 int flags, spdk_accel_completion_cb cb_fn, void *cb_arg) 449 { 450 struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch); 451 struct spdk_accel_task *accel_task; 452 struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_DECOMPRESS]; 453 struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_DECOMPRESS]; 454 455 accel_task = _get_task(accel_ch, cb_fn, cb_arg); 456 if (accel_task == NULL) { 457 return -ENOMEM; 458 } 459 460 accel_task->s.iovs = src_iovs; 461 accel_task->s.iovcnt = src_iovcnt; 462 accel_task->d.iovs = dst_iovs; 463 accel_task->d.iovcnt = dst_iovcnt; 464 accel_task->flags = flags; 465 accel_task->op_code = ACCEL_OPC_DECOMPRESS; 466 467 return module->submit_tasks(module_ch, accel_task); 468 469 return 0; 470 } 471 472 473 static struct spdk_accel_module_if * 474 _module_find_by_name(const char *name) 475 { 476 struct spdk_accel_module_if *accel_module = NULL; 477 478 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 479 if (strcmp(name, accel_module->name) == 0) { 480 break; 481 } 482 } 483 484 return accel_module; 485 } 486 487 /* Helper function when when accel modules register with the framework. */ 488 void 489 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module) 490 { 491 if (_module_find_by_name(accel_module->name)) { 492 SPDK_NOTICELOG("Accel module %s already registered\n", accel_module->name); 493 assert(false); 494 return; 495 } 496 497 /* Make sure that the software module is at the head of the list, this 498 * will assure that all opcodes are later assigned to software first and 499 * then udpated to HW modules as they are registered. 500 */ 501 if (strcmp(accel_module->name, "software") == 0) { 502 TAILQ_INSERT_HEAD(&spdk_accel_module_list, accel_module, tailq); 503 } else { 504 TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq); 505 } 506 507 if (accel_module->get_ctx_size && accel_module->get_ctx_size() > g_max_accel_module_size) { 508 g_max_accel_module_size = accel_module->get_ctx_size(); 509 } 510 } 511 512 /* Framework level channel create callback. */ 513 static int 514 accel_create_channel(void *io_device, void *ctx_buf) 515 { 516 struct accel_io_channel *accel_ch = ctx_buf; 517 struct spdk_accel_task *accel_task; 518 uint8_t *task_mem; 519 int i, j; 520 521 accel_ch->task_pool_base = calloc(MAX_TASKS_PER_CHANNEL, g_max_accel_module_size); 522 if (accel_ch->task_pool_base == NULL) { 523 return -ENOMEM; 524 } 525 526 TAILQ_INIT(&accel_ch->task_pool); 527 task_mem = accel_ch->task_pool_base; 528 for (i = 0 ; i < MAX_TASKS_PER_CHANNEL; i++) { 529 accel_task = (struct spdk_accel_task *)task_mem; 530 TAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link); 531 task_mem += g_max_accel_module_size; 532 } 533 534 /* Assign modules and get IO channels for each */ 535 for (i = 0; i < ACCEL_OPC_LAST; i++) { 536 accel_ch->module_ch[i] = g_modules_opc[i]->get_io_channel(); 537 /* This can happen if idxd runs out of channels. */ 538 if (accel_ch->module_ch[i] == NULL) { 539 goto err; 540 } 541 } 542 543 return 0; 544 err: 545 for (j = 0; j < i; j++) { 546 spdk_put_io_channel(accel_ch->module_ch[j]); 547 } 548 free(accel_ch->task_pool_base); 549 return -ENOMEM; 550 } 551 552 /* Framework level channel destroy callback. */ 553 static void 554 accel_destroy_channel(void *io_device, void *ctx_buf) 555 { 556 struct accel_io_channel *accel_ch = ctx_buf; 557 int i; 558 559 for (i = 0; i < ACCEL_OPC_LAST; i++) { 560 assert(accel_ch->module_ch[i] != NULL); 561 spdk_put_io_channel(accel_ch->module_ch[i]); 562 accel_ch->module_ch[i] = NULL; 563 } 564 565 free(accel_ch->task_pool_base); 566 } 567 568 struct spdk_io_channel * 569 spdk_accel_get_io_channel(void) 570 { 571 return spdk_get_io_channel(&spdk_accel_module_list); 572 } 573 574 static void 575 accel_module_initialize(void) 576 { 577 struct spdk_accel_module_if *accel_module; 578 579 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 580 accel_module->module_init(); 581 } 582 } 583 584 int 585 spdk_accel_initialize(void) 586 { 587 enum accel_opcode op; 588 struct spdk_accel_module_if *accel_module = NULL; 589 590 g_modules_started = true; 591 accel_module_initialize(); 592 593 /* Create our priority global map of opcodes to modules, we populate starting 594 * with the software module (guaranteed to be first on the list) and then 595 * updating opcodes with HW modules that have been initilaized. 596 * NOTE: all opcodes must be supported by software in the event that no HW 597 * modules are initilaized to support the operation. 598 */ 599 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 600 for (op = 0; op < ACCEL_OPC_LAST; op++) { 601 if (accel_module->supports_opcode(op)) { 602 g_modules_opc[op] = accel_module; 603 SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name); 604 } 605 } 606 } 607 608 /* Now lets check for overrides and apply all that exist */ 609 for (op = 0; op < ACCEL_OPC_LAST; op++) { 610 if (g_modules_opc_override[op] != NULL) { 611 accel_module = _module_find_by_name(g_modules_opc_override[op]); 612 if (accel_module == NULL) { 613 SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]); 614 return -EINVAL; 615 } 616 if (accel_module->supports_opcode(op) == false) { 617 SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op); 618 return -EINVAL; 619 } 620 g_modules_opc[op] = accel_module; 621 } 622 } 623 624 #ifdef DEBUG 625 for (op = 0; op < ACCEL_OPC_LAST; op++) { 626 assert(g_modules_opc[op] != NULL); 627 } 628 #endif 629 /* 630 * We need a unique identifier for the accel framework, so use the 631 * spdk_accel_module_list address for this purpose. 632 */ 633 spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel, 634 sizeof(struct accel_io_channel), "accel"); 635 636 return 0; 637 } 638 639 static void 640 accel_module_finish_cb(void) 641 { 642 spdk_accel_fini_cb cb_fn = g_fini_cb_fn; 643 644 cb_fn(g_fini_cb_arg); 645 g_fini_cb_fn = NULL; 646 g_fini_cb_arg = NULL; 647 } 648 649 static void 650 accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str, 651 const char *module_str) 652 { 653 spdk_json_write_object_begin(w); 654 spdk_json_write_named_string(w, "method", "accel_assign_opc"); 655 spdk_json_write_named_object_begin(w, "params"); 656 spdk_json_write_named_string(w, "opname", opc_str); 657 spdk_json_write_named_string(w, "module", module_str); 658 spdk_json_write_object_end(w); 659 spdk_json_write_object_end(w); 660 } 661 662 void 663 spdk_accel_write_config_json(struct spdk_json_write_ctx *w) 664 { 665 struct spdk_accel_module_if *accel_module; 666 int i; 667 668 /* 669 * The accel fw has no config, there may be some in 670 * the modules though. 671 */ 672 spdk_json_write_array_begin(w); 673 TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) { 674 if (accel_module->write_config_json) { 675 accel_module->write_config_json(w); 676 } 677 } 678 for (i = 0; i < ACCEL_OPC_LAST; i++) { 679 if (g_modules_opc_override[i]) { 680 accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]); 681 } 682 } 683 spdk_json_write_array_end(w); 684 } 685 686 void 687 spdk_accel_module_finish(void) 688 { 689 if (!g_accel_module) { 690 g_accel_module = TAILQ_FIRST(&spdk_accel_module_list); 691 } else { 692 g_accel_module = TAILQ_NEXT(g_accel_module, tailq); 693 } 694 695 if (!g_accel_module) { 696 accel_module_finish_cb(); 697 return; 698 } 699 700 if (g_accel_module->module_fini) { 701 spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL); 702 } else { 703 spdk_accel_module_finish(); 704 } 705 } 706 707 void 708 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg) 709 { 710 enum accel_opcode op; 711 712 assert(cb_fn != NULL); 713 714 g_fini_cb_fn = cb_fn; 715 g_fini_cb_arg = cb_arg; 716 717 for (op = 0; op < ACCEL_OPC_LAST; op++) { 718 if (g_modules_opc_override[op] != NULL) { 719 free(g_modules_opc_override[op]); 720 g_modules_opc_override[op] = NULL; 721 } 722 g_modules_opc[op] = NULL; 723 } 724 725 spdk_io_device_unregister(&spdk_accel_module_list, NULL); 726 spdk_accel_module_finish(); 727 } 728 729 SPDK_LOG_REGISTER_COMPONENT(accel) 730