xref: /spdk/lib/accel/accel.c (revision ea8f5b27612fa03698a9ce3ad4bd37765d9cdfa5)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 
8 #include "spdk_internal/accel_engine.h"
9 
10 #include "accel_internal.h"
11 
12 #include "spdk/env.h"
13 #include "spdk/likely.h"
14 #include "spdk/log.h"
15 #include "spdk/thread.h"
16 #include "spdk/json.h"
17 #include "spdk/crc32.h"
18 #include "spdk/util.h"
19 
20 /* Accelerator Framework: The following provides a top level
21  * generic API for the accelerator functions defined here. Modules,
22  * such as the one in /module/accel/ioat, supply the implementation
23  * with the exception of the pure software implementation contained
24  * later in this file.
25  */
26 
27 #define ALIGN_4K			0x1000
28 #define MAX_TASKS_PER_CHANNEL		0x800
29 
30 /* Largest context size for all accel modules */
31 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task);
32 
33 static struct spdk_accel_module_if *g_accel_engine_module = NULL;
34 static spdk_accel_fini_cb g_fini_cb_fn = NULL;
35 static void *g_fini_cb_arg = NULL;
36 static bool g_engine_started = false;
37 
38 /* Global list of registered accelerator modules */
39 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list =
40 	TAILQ_HEAD_INITIALIZER(spdk_accel_module_list);
41 
42 /* Global array mapping capabilities to engines */
43 static struct spdk_accel_module_if *g_engines_opc[ACCEL_OPC_LAST] = {};
44 static char *g_engines_opc_override[ACCEL_OPC_LAST] = {};
45 
46 struct accel_io_channel {
47 	struct spdk_io_channel		*engine_ch[ACCEL_OPC_LAST];
48 	void				*task_pool_base;
49 	TAILQ_HEAD(, spdk_accel_task)	task_pool;
50 };
51 
52 int
53 spdk_accel_get_opc_engine_name(enum accel_opcode opcode, const char **engine_name)
54 {
55 	if (opcode >= ACCEL_OPC_LAST) {
56 		/* invalid opcode */
57 		return -EINVAL;
58 	}
59 
60 	if (g_engines_opc[opcode]) {
61 		*engine_name = g_engines_opc[opcode]->name;
62 	} else {
63 		return -ENOENT;
64 	}
65 
66 	return 0;
67 }
68 
69 void
70 _accel_for_each_engine(struct engine_info *info, _accel_for_each_engine_fn fn)
71 {
72 	struct spdk_accel_module_if *accel_engine;
73 	enum accel_opcode opcode;
74 	int j = 0;
75 
76 	TAILQ_FOREACH(accel_engine, &spdk_accel_module_list, tailq) {
77 		for (opcode = 0; opcode < ACCEL_OPC_LAST; opcode++) {
78 			if (accel_engine->supports_opcode(opcode)) {
79 				info->ops[j] = opcode;
80 				j++;
81 			}
82 		}
83 		info->name = accel_engine->name;
84 		info->num_ops = j;
85 		fn(info);
86 		j = 0;
87 	}
88 }
89 
90 int
91 spdk_accel_assign_opc(enum accel_opcode opcode, const char *name)
92 {
93 	if (g_engine_started == true) {
94 		/* we don't allow re-assignment once things have started */
95 		return -EINVAL;
96 	}
97 
98 	if (opcode >= ACCEL_OPC_LAST) {
99 		/* invalid opcode */
100 		return -EINVAL;
101 	}
102 
103 	/* engine selection will be validated after the framework starts. */
104 	g_engines_opc_override[opcode] = strdup(name);
105 
106 	return 0;
107 }
108 
109 void
110 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status)
111 {
112 	struct accel_io_channel *accel_ch = accel_task->accel_ch;
113 	spdk_accel_completion_cb	cb_fn = accel_task->cb_fn;
114 	void				*cb_arg = accel_task->cb_arg;
115 
116 	/* We should put the accel_task into the list firstly in order to avoid
117 	 * the accel task list is exhausted when there is recursive call to
118 	 * allocate accel_task in user's call back function (cb_fn)
119 	 */
120 	TAILQ_INSERT_HEAD(&accel_ch->task_pool, accel_task, link);
121 
122 	cb_fn(cb_arg, status);
123 }
124 
125 inline static struct spdk_accel_task *
126 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg)
127 {
128 	struct spdk_accel_task *accel_task;
129 
130 	accel_task = TAILQ_FIRST(&accel_ch->task_pool);
131 	if (accel_task == NULL) {
132 		return NULL;
133 	}
134 
135 	TAILQ_REMOVE(&accel_ch->task_pool, accel_task, link);
136 	accel_task->link.tqe_next = NULL;
137 	accel_task->link.tqe_prev = NULL;
138 
139 	accel_task->cb_fn = cb_fn;
140 	accel_task->cb_arg = cb_arg;
141 	accel_task->accel_ch = accel_ch;
142 
143 	return accel_task;
144 }
145 
146 
147 
148 /* Accel framework public API for copy function */
149 int
150 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src,
151 		       uint64_t nbytes, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
152 {
153 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
154 	struct spdk_accel_task *accel_task;
155 	struct spdk_accel_module_if *engine = g_engines_opc[ACCEL_OPC_COPY];
156 	struct spdk_io_channel *engine_ch = accel_ch->engine_ch[ACCEL_OPC_COPY];
157 
158 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
159 	if (accel_task == NULL) {
160 		return -ENOMEM;
161 	}
162 
163 	accel_task->dst = dst;
164 	accel_task->src = src;
165 	accel_task->op_code = ACCEL_OPC_COPY;
166 	accel_task->nbytes = nbytes;
167 	accel_task->flags = flags;
168 
169 	return engine->submit_tasks(engine_ch, accel_task);
170 }
171 
172 /* Accel framework public API for dual cast copy function */
173 int
174 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1,
175 			   void *dst2, void *src, uint64_t nbytes, int flags,
176 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
177 {
178 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
179 	struct spdk_accel_task *accel_task;
180 	struct spdk_accel_module_if *engine = g_engines_opc[ACCEL_OPC_DUALCAST];
181 	struct spdk_io_channel *engine_ch = accel_ch->engine_ch[ACCEL_OPC_DUALCAST];
182 
183 	if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) {
184 		SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n");
185 		return -EINVAL;
186 	}
187 
188 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
189 	if (accel_task == NULL) {
190 		return -ENOMEM;
191 	}
192 
193 	accel_task->src = src;
194 	accel_task->dst = dst1;
195 	accel_task->dst2 = dst2;
196 	accel_task->nbytes = nbytes;
197 	accel_task->flags = flags;
198 	accel_task->op_code = ACCEL_OPC_DUALCAST;
199 
200 	return engine->submit_tasks(engine_ch, accel_task);
201 }
202 
203 /* Accel framework public API for compare function */
204 int
205 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1,
206 			  void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
207 			  void *cb_arg)
208 {
209 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
210 	struct spdk_accel_task *accel_task;
211 	struct spdk_accel_module_if *engine = g_engines_opc[ACCEL_OPC_COMPARE];
212 	struct spdk_io_channel *engine_ch = accel_ch->engine_ch[ACCEL_OPC_COMPARE];
213 
214 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
215 	if (accel_task == NULL) {
216 		return -ENOMEM;
217 	}
218 
219 	accel_task->src = src1;
220 	accel_task->src2 = src2;
221 	accel_task->nbytes = nbytes;
222 	accel_task->op_code = ACCEL_OPC_COMPARE;
223 
224 	return engine->submit_tasks(engine_ch, accel_task);
225 }
226 
227 /* Accel framework public API for fill function */
228 int
229 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst,
230 		       uint8_t fill, uint64_t nbytes, int flags,
231 		       spdk_accel_completion_cb cb_fn, void *cb_arg)
232 {
233 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
234 	struct spdk_accel_task *accel_task;
235 	struct spdk_accel_module_if *engine = g_engines_opc[ACCEL_OPC_FILL];
236 	struct spdk_io_channel *engine_ch = accel_ch->engine_ch[ACCEL_OPC_FILL];
237 
238 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
239 	if (accel_task == NULL) {
240 		return -ENOMEM;
241 	}
242 
243 	accel_task->dst = dst;
244 	memset(&accel_task->fill_pattern, fill, sizeof(uint64_t));
245 	accel_task->nbytes = nbytes;
246 	accel_task->flags = flags;
247 	accel_task->op_code = ACCEL_OPC_FILL;
248 
249 	return engine->submit_tasks(engine_ch, accel_task);
250 }
251 
252 /* Accel framework public API for CRC-32C function */
253 int
254 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst,
255 			 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
256 			 void *cb_arg)
257 {
258 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
259 	struct spdk_accel_task *accel_task;
260 	struct spdk_accel_module_if *engine = g_engines_opc[ACCEL_OPC_CRC32C];
261 	struct spdk_io_channel *engine_ch = accel_ch->engine_ch[ACCEL_OPC_CRC32C];
262 
263 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
264 	if (accel_task == NULL) {
265 		return -ENOMEM;
266 	}
267 
268 	accel_task->crc_dst = crc_dst;
269 	accel_task->src = src;
270 	accel_task->v.iovcnt = 0;
271 	accel_task->seed = seed;
272 	accel_task->nbytes = nbytes;
273 	accel_task->op_code = ACCEL_OPC_CRC32C;
274 
275 	return engine->submit_tasks(engine_ch, accel_task);
276 }
277 
278 /* Accel framework public API for chained CRC-32C function */
279 int
280 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst,
281 			  struct iovec *iov, uint32_t iov_cnt, uint32_t seed,
282 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
283 {
284 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
285 	struct spdk_accel_task *accel_task;
286 	struct spdk_accel_module_if *engine = g_engines_opc[ACCEL_OPC_CRC32C];
287 	struct spdk_io_channel *engine_ch = accel_ch->engine_ch[ACCEL_OPC_CRC32C];
288 
289 	if (iov == NULL) {
290 		SPDK_ERRLOG("iov should not be NULL");
291 		return -EINVAL;
292 	}
293 
294 	if (!iov_cnt) {
295 		SPDK_ERRLOG("iovcnt should not be zero value\n");
296 		return -EINVAL;
297 	}
298 
299 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
300 	if (accel_task == NULL) {
301 		SPDK_ERRLOG("no memory\n");
302 		assert(0);
303 		return -ENOMEM;
304 	}
305 
306 	accel_task->v.iovs = iov;
307 	accel_task->v.iovcnt = iov_cnt;
308 	accel_task->crc_dst = crc_dst;
309 	accel_task->seed = seed;
310 	accel_task->op_code = ACCEL_OPC_CRC32C;
311 
312 	return engine->submit_tasks(engine_ch, accel_task);
313 }
314 
315 /* Accel framework public API for copy with CRC-32C function */
316 int
317 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst,
318 			      void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes,
319 			      int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
320 {
321 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
322 	struct spdk_accel_task *accel_task;
323 	struct spdk_accel_module_if *engine = g_engines_opc[ACCEL_OPC_COPY_CRC32C];
324 	struct spdk_io_channel *engine_ch = accel_ch->engine_ch[ACCEL_OPC_COPY_CRC32C];
325 
326 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
327 	if (accel_task == NULL) {
328 		return -ENOMEM;
329 	}
330 
331 	accel_task->dst = dst;
332 	accel_task->src = src;
333 	accel_task->crc_dst = crc_dst;
334 	accel_task->v.iovcnt = 0;
335 	accel_task->seed = seed;
336 	accel_task->nbytes = nbytes;
337 	accel_task->flags = flags;
338 	accel_task->op_code = ACCEL_OPC_COPY_CRC32C;
339 
340 	return engine->submit_tasks(engine_ch, accel_task);
341 }
342 
343 /* Accel framework public API for chained copy + CRC-32C function */
344 int
345 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst,
346 			       struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst,
347 			       uint32_t seed, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
348 {
349 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
350 	struct spdk_accel_task *accel_task;
351 	struct spdk_accel_module_if *engine = g_engines_opc[ACCEL_OPC_COPY_CRC32C];
352 	struct spdk_io_channel *engine_ch = accel_ch->engine_ch[ACCEL_OPC_COPY_CRC32C];
353 	uint64_t nbytes;
354 	uint32_t i;
355 
356 	if (src_iovs == NULL) {
357 		SPDK_ERRLOG("iov should not be NULL");
358 		return -EINVAL;
359 	}
360 
361 	if (!iov_cnt) {
362 		SPDK_ERRLOG("iovcnt should not be zero value\n");
363 		return -EINVAL;
364 	}
365 
366 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
367 	if (accel_task == NULL) {
368 		SPDK_ERRLOG("no memory\n");
369 		assert(0);
370 		return -ENOMEM;
371 	}
372 
373 	nbytes = 0;
374 	for (i = 0; i < iov_cnt; i++) {
375 		nbytes += src_iovs[i].iov_len;
376 	}
377 
378 	accel_task->v.iovs = src_iovs;
379 	accel_task->v.iovcnt = iov_cnt;
380 	accel_task->dst = (void *)dst;
381 	accel_task->crc_dst = crc_dst;
382 	accel_task->seed = seed;
383 	accel_task->nbytes = nbytes;
384 	accel_task->flags = flags;
385 	accel_task->op_code = ACCEL_OPC_COPY_CRC32C;
386 
387 	return engine->submit_tasks(engine_ch, accel_task);
388 }
389 
390 int
391 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, void *src, uint64_t nbytes_dst,
392 			   uint64_t nbytes_src, uint32_t *output_size, int flags,
393 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
394 {
395 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
396 	struct spdk_accel_task *accel_task;
397 	struct spdk_accel_module_if *engine = g_engines_opc[ACCEL_OPC_COMPRESS];
398 	struct spdk_io_channel *engine_ch = accel_ch->engine_ch[ACCEL_OPC_COMPRESS];
399 
400 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
401 	if (accel_task == NULL) {
402 		return -ENOMEM;
403 	}
404 
405 	accel_task->output_size = output_size;
406 	accel_task->src = src;
407 	accel_task->dst = dst;
408 	accel_task->nbytes = nbytes_src;
409 	accel_task->nbytes_dst = nbytes_dst;
410 	accel_task->flags = flags;
411 	accel_task->op_code = ACCEL_OPC_COMPRESS;
412 
413 	return engine->submit_tasks(engine_ch, accel_task);
414 
415 	return 0;
416 }
417 
418 int
419 spdk_accel_submit_decompress(struct spdk_io_channel *ch, void *dst, void *src, uint64_t nbytes_dst,
420 			     uint64_t nbytes_src, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
421 {
422 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
423 	struct spdk_accel_task *accel_task;
424 	struct spdk_accel_module_if *engine = g_engines_opc[ACCEL_OPC_DECOMPRESS];
425 	struct spdk_io_channel *engine_ch = accel_ch->engine_ch[ACCEL_OPC_DECOMPRESS];
426 
427 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
428 	if (accel_task == NULL) {
429 		return -ENOMEM;
430 	}
431 
432 	accel_task->src = src;
433 	accel_task->dst = dst;
434 	accel_task->nbytes = nbytes_src;
435 	accel_task->nbytes_dst = nbytes_dst;
436 	accel_task->flags = flags;
437 	accel_task->op_code = ACCEL_OPC_DECOMPRESS;
438 
439 	return engine->submit_tasks(engine_ch, accel_task);
440 
441 	return 0;
442 }
443 
444 
445 static struct spdk_accel_module_if *
446 _module_find_by_name(const char *name)
447 {
448 	struct spdk_accel_module_if *accel_module = NULL;
449 
450 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
451 		if (strcmp(name, accel_module->name) == 0) {
452 			break;
453 		}
454 	}
455 
456 	return accel_module;
457 }
458 
459 /* Helper function when when accel modules register with the framework. */
460 void
461 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module)
462 {
463 	if (_module_find_by_name(accel_module->name)) {
464 		SPDK_NOTICELOG("Accel module %s already registered\n", accel_module->name);
465 		assert(false);
466 		return;
467 	}
468 
469 	/* Make sure that the software module is at the head of the list, this
470 	 * will assure that all opcodes are later assigned to software first and
471 	 * then udpated to HW engines as they are registered.
472 	 */
473 	if (strcmp(accel_module->name, "software") == 0) {
474 		TAILQ_INSERT_HEAD(&spdk_accel_module_list, accel_module, tailq);
475 	} else {
476 		TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq);
477 	}
478 
479 	if (accel_module->get_ctx_size && accel_module->get_ctx_size() > g_max_accel_module_size) {
480 		g_max_accel_module_size = accel_module->get_ctx_size();
481 	}
482 }
483 
484 /* Framework level channel create callback. */
485 static int
486 accel_create_channel(void *io_device, void *ctx_buf)
487 {
488 	struct accel_io_channel	*accel_ch = ctx_buf;
489 	struct spdk_accel_task *accel_task;
490 	uint8_t *task_mem;
491 	int i, j;
492 
493 	accel_ch->task_pool_base = calloc(MAX_TASKS_PER_CHANNEL, g_max_accel_module_size);
494 	if (accel_ch->task_pool_base == NULL) {
495 		return -ENOMEM;
496 	}
497 
498 	TAILQ_INIT(&accel_ch->task_pool);
499 	task_mem = accel_ch->task_pool_base;
500 	for (i = 0 ; i < MAX_TASKS_PER_CHANNEL; i++) {
501 		accel_task = (struct spdk_accel_task *)task_mem;
502 		TAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link);
503 		task_mem += g_max_accel_module_size;
504 	}
505 
506 	/* Assign engines and get IO channels for each */
507 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
508 		accel_ch->engine_ch[i] = g_engines_opc[i]->get_io_channel();
509 		/* This can happen if idxd runs out of channels. */
510 		if (accel_ch->engine_ch[i] == NULL) {
511 			goto err;
512 		}
513 	}
514 
515 	return 0;
516 err:
517 	for (j = 0; j < i; j++) {
518 		spdk_put_io_channel(accel_ch->engine_ch[j]);
519 	}
520 	free(accel_ch->task_pool_base);
521 	return -ENOMEM;
522 }
523 
524 /* Framework level channel destroy callback. */
525 static void
526 accel_destroy_channel(void *io_device, void *ctx_buf)
527 {
528 	struct accel_io_channel	*accel_ch = ctx_buf;
529 	int i;
530 
531 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
532 		assert(accel_ch->engine_ch[i] != NULL);
533 		spdk_put_io_channel(accel_ch->engine_ch[i]);
534 		accel_ch->engine_ch[i] = NULL;
535 	}
536 
537 	free(accel_ch->task_pool_base);
538 }
539 
540 struct spdk_io_channel *
541 spdk_accel_get_io_channel(void)
542 {
543 	return spdk_get_io_channel(&spdk_accel_module_list);
544 }
545 
546 static void
547 accel_module_initialize(void)
548 {
549 	struct spdk_accel_module_if *accel_engine_module;
550 
551 	TAILQ_FOREACH(accel_engine_module, &spdk_accel_module_list, tailq) {
552 		accel_engine_module->module_init();
553 	}
554 }
555 
556 int
557 spdk_accel_initialize(void)
558 {
559 	enum accel_opcode op;
560 	struct spdk_accel_module_if *accel_module = NULL;
561 
562 	g_engine_started = true;
563 	accel_module_initialize();
564 
565 	/* Create our priority global map of opcodes to engines, we populate starting
566 	 * with the software engine (guaranteed to be first on the list) and then
567 	 * updating opcodes with HW engines that have been initilaized.
568 	 * NOTE: all opcodes must be suported by software in the event that no HW
569 	 * engines are initilaized to support the operation.
570 	 */
571 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
572 		for (op = 0; op < ACCEL_OPC_LAST; op++) {
573 			if (accel_module->supports_opcode(op)) {
574 				g_engines_opc[op] = accel_module;
575 				SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name);
576 			}
577 		}
578 	}
579 
580 	/* Now lets check for overrides and apply all that exist */
581 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
582 		if (g_engines_opc_override[op] != NULL) {
583 			accel_module = _module_find_by_name(g_engines_opc_override[op]);
584 			if (accel_module == NULL) {
585 				SPDK_ERRLOG("Invalid module name of %s\n", g_engines_opc_override[op]);
586 				return -EINVAL;
587 			}
588 			if (accel_module->supports_opcode(op) == false) {
589 				SPDK_ERRLOG("Engine %s does not support op code %d\n", accel_module->name, op);
590 				return -EINVAL;
591 			}
592 			g_engines_opc[op] = accel_module;
593 		}
594 	}
595 
596 #ifdef DEBUG
597 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
598 		assert(g_engines_opc[op] != NULL);
599 	}
600 #endif
601 	/*
602 	 * We need a unique identifier for the accel framework, so use the
603 	 * spdk_accel_module_list address for this purpose.
604 	 */
605 	spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel,
606 				sizeof(struct accel_io_channel), "accel");
607 
608 	return 0;
609 }
610 
611 static void
612 accel_module_finish_cb(void)
613 {
614 	spdk_accel_fini_cb cb_fn = g_fini_cb_fn;
615 
616 	cb_fn(g_fini_cb_arg);
617 	g_fini_cb_fn = NULL;
618 	g_fini_cb_arg = NULL;
619 }
620 
621 void
622 spdk_accel_write_config_json(struct spdk_json_write_ctx *w)
623 {
624 	struct spdk_accel_module_if *accel_engine_module;
625 
626 	/*
627 	 * The accel fw has no config, there may be some in
628 	 * the engines/modules though.
629 	 */
630 	spdk_json_write_array_begin(w);
631 	TAILQ_FOREACH(accel_engine_module, &spdk_accel_module_list, tailq) {
632 		if (accel_engine_module->write_config_json) {
633 			accel_engine_module->write_config_json(w);
634 		}
635 	}
636 	spdk_json_write_array_end(w);
637 }
638 
639 void
640 spdk_accel_module_finish(void)
641 {
642 	if (!g_accel_engine_module) {
643 		g_accel_engine_module = TAILQ_FIRST(&spdk_accel_module_list);
644 	} else {
645 		g_accel_engine_module = TAILQ_NEXT(g_accel_engine_module, tailq);
646 	}
647 
648 	if (!g_accel_engine_module) {
649 		accel_module_finish_cb();
650 		return;
651 	}
652 
653 	if (g_accel_engine_module->module_fini) {
654 		spdk_thread_send_msg(spdk_get_thread(), g_accel_engine_module->module_fini, NULL);
655 	} else {
656 		spdk_accel_module_finish();
657 	}
658 }
659 
660 void
661 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg)
662 {
663 	enum accel_opcode op;
664 
665 	assert(cb_fn != NULL);
666 
667 	g_fini_cb_fn = cb_fn;
668 	g_fini_cb_arg = cb_arg;
669 
670 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
671 		if (g_engines_opc_override[op] != NULL) {
672 			free(g_engines_opc_override[op]);
673 			g_engines_opc_override[op] = NULL;
674 		}
675 		g_engines_opc[op] = NULL;
676 	}
677 
678 	spdk_io_device_unregister(&spdk_accel_module_list, NULL);
679 	spdk_accel_module_finish();
680 }
681 
682 SPDK_LOG_REGISTER_COMPONENT(accel)
683