xref: /spdk/module/accel/dsa/accel_dsa.c (revision 698b2423d5f98e56c36dcf8484205bb034d0f6f5)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
4  *   All rights reserved.
5  */
6 
7 #include "accel_dsa.h"
8 
9 #include "spdk/stdinc.h"
10 
11 #include "spdk/accel_module.h"
12 #include "spdk/log.h"
13 #include "spdk_internal/idxd.h"
14 
15 #include "spdk/env.h"
16 #include "spdk/event.h"
17 #include "spdk/likely.h"
18 #include "spdk/thread.h"
19 #include "spdk/idxd.h"
20 #include "spdk/util.h"
21 #include "spdk/json.h"
22 #include "spdk/trace.h"
23 #include "spdk_internal/trace_defs.h"
24 
25 #define ACCEL_DSA_MD_IOBUF_SMALL_CACHE_SIZE			128
26 #define ACCEL_DSA_MD_IOBUF_LARGE_CACHE_SIZE			32
27 
28 static bool g_dsa_enable = false;
29 static bool g_kernel_mode = false;
30 
31 enum channel_state {
32 	IDXD_CHANNEL_ACTIVE,
33 	IDXD_CHANNEL_ERROR,
34 };
35 
36 static bool g_dsa_initialized = false;
37 
38 struct idxd_device {
39 	struct				spdk_idxd_device *dsa;
40 	TAILQ_ENTRY(idxd_device)	tailq;
41 };
42 static TAILQ_HEAD(, idxd_device) g_dsa_devices = TAILQ_HEAD_INITIALIZER(g_dsa_devices);
43 static struct idxd_device *g_next_dev = NULL;
44 static uint32_t g_num_devices = 0;
45 static pthread_mutex_t g_dev_lock = PTHREAD_MUTEX_INITIALIZER;
46 
47 struct idxd_task {
48 	struct spdk_accel_task	task;
49 	struct idxd_io_channel	*chan;
50 	struct iovec		md_iov;
51 	struct spdk_iobuf_entry	iobuf;
52 };
53 
54 struct idxd_io_channel {
55 	struct spdk_idxd_io_channel	*chan;
56 	struct idxd_device		*dev;
57 	enum channel_state		state;
58 	struct spdk_poller		*poller;
59 	uint32_t			num_outstanding;
60 	STAILQ_HEAD(, spdk_accel_task)	queued_tasks;
61 	struct spdk_iobuf_channel	iobuf;
62 };
63 
64 static struct spdk_io_channel *dsa_get_io_channel(void);
65 
66 static struct idxd_device *
67 idxd_select_device(struct idxd_io_channel *chan)
68 {
69 	uint32_t count = 0;
70 	struct idxd_device *dev;
71 	uint32_t numa_id = spdk_env_get_numa_id(spdk_env_get_current_core());
72 
73 	/*
74 	 * We allow channels to share underlying devices,
75 	 * selection is round-robin based with a limitation
76 	 * on how many channel can share one device.
77 	 */
78 	do {
79 		/* select next device */
80 		pthread_mutex_lock(&g_dev_lock);
81 		g_next_dev = TAILQ_NEXT(g_next_dev, tailq);
82 		if (g_next_dev == NULL) {
83 			g_next_dev = TAILQ_FIRST(&g_dsa_devices);
84 		}
85 		dev = g_next_dev;
86 		pthread_mutex_unlock(&g_dev_lock);
87 
88 		if (numa_id != spdk_idxd_get_socket(dev->dsa)) {
89 			continue;
90 		}
91 
92 		/*
93 		 * Now see if a channel is available on this one. We only
94 		 * allow a specific number of channels to share a device
95 		 * to limit outstanding IO for flow control purposes.
96 		 */
97 		chan->chan = spdk_idxd_get_channel(dev->dsa);
98 		if (chan->chan != NULL) {
99 			SPDK_DEBUGLOG(accel_dsa, "On socket %d using device on numa %d\n",
100 				      numa_id, spdk_idxd_get_socket(dev->dsa));
101 			return dev;
102 		}
103 	} while (++count < g_num_devices);
104 
105 	/* We are out of available channels and/or devices for the local socket. We fix the number
106 	 * of channels that we allocate per device and only allocate devices on the same socket
107 	 * that the current thread is on. If on a 2 socket system it may be possible to avoid
108 	 * this situation by spreading threads across the sockets.
109 	 */
110 	SPDK_ERRLOG("No more DSA devices available on the local socket.\n");
111 	return NULL;
112 }
113 
114 static void
115 dsa_done(void *cb_arg, int status)
116 {
117 	struct idxd_task *idxd_task = cb_arg;
118 	struct idxd_io_channel *chan;
119 	int rc;
120 
121 	chan = idxd_task->chan;
122 
123 	/* If the DSA DIF Check operation detects an error, detailed info about
124 	 * this error (like actual/expected values) needs to be obtained by
125 	 * calling the software DIF Verify operation.
126 	 */
127 	if (spdk_unlikely(status == -EIO)) {
128 		if (idxd_task->task.op_code == SPDK_ACCEL_OPC_DIF_VERIFY ||
129 		    idxd_task->task.op_code == SPDK_ACCEL_OPC_DIF_VERIFY_COPY) {
130 			rc = spdk_dif_verify(idxd_task->task.s.iovs, idxd_task->task.s.iovcnt,
131 					     idxd_task->task.dif.num_blocks,
132 					     idxd_task->task.dif.ctx, idxd_task->task.dif.err);
133 			if (rc != 0) {
134 				SPDK_ERRLOG("DIF error detected. type=%d, offset=%" PRIu32 "\n",
135 					    idxd_task->task.dif.err->err_type,
136 					    idxd_task->task.dif.err->err_offset);
137 			}
138 		}
139 	}
140 
141 	assert(chan->num_outstanding > 0);
142 	spdk_trace_record(TRACE_ACCEL_DSA_OP_COMPLETE, 0, 0, 0, chan->num_outstanding - 1);
143 	chan->num_outstanding--;
144 
145 	spdk_accel_task_complete(&idxd_task->task, status);
146 }
147 
148 static int
149 idxd_submit_dualcast(struct idxd_io_channel *ch, struct idxd_task *idxd_task, int flags)
150 {
151 	struct spdk_accel_task *task = &idxd_task->task;
152 
153 	if (spdk_unlikely(task->d.iovcnt != 1 || task->d2.iovcnt != 1 || task->s.iovcnt != 1)) {
154 		return -EINVAL;
155 	}
156 
157 	if (spdk_unlikely(task->d.iovs[0].iov_len != task->s.iovs[0].iov_len ||
158 			  task->d.iovs[0].iov_len != task->d2.iovs[0].iov_len)) {
159 		return -EINVAL;
160 	}
161 
162 	return spdk_idxd_submit_dualcast(ch->chan, task->d.iovs[0].iov_base,
163 					 task->d2.iovs[0].iov_base, task->s.iovs[0].iov_base,
164 					 task->d.iovs[0].iov_len, flags, dsa_done, idxd_task);
165 }
166 
167 static int
168 check_dsa_dif_strip_overlap_bufs(struct spdk_accel_task *task)
169 {
170 	uint64_t src_seg_addr_end_ext;
171 	uint64_t dst_seg_addr_end_ext;
172 	size_t i;
173 
174 	/* The number of source and destination iovecs must be the same.
175 	 * If so, one of them can be used to iterate over both vectors
176 	 * later in the loop. */
177 	if (task->d.iovcnt != task->s.iovcnt) {
178 		SPDK_ERRLOG("Mismatched iovcnts: src=%d, dst=%d\n",
179 			    task->s.iovcnt, task->d.iovcnt);
180 		return -EINVAL;
181 	}
182 
183 	for (i = 0; i < task->s.iovcnt; i++) {
184 		src_seg_addr_end_ext = (uint64_t)task->s.iovs[i].iov_base +
185 				       task->s.iovs[i].iov_len;
186 
187 		dst_seg_addr_end_ext = (uint64_t)task->d.iovs[i].iov_base +
188 				       task->s.iovs[i].iov_len;
189 
190 		if ((dst_seg_addr_end_ext >= (uint64_t)task->s.iovs[i].iov_base) &&
191 		    (dst_seg_addr_end_ext <= src_seg_addr_end_ext)) {
192 			return -EFAULT;
193 		}
194 	}
195 
196 	return 0;
197 }
198 
199 static void
200 spdk_accel_sw_task_complete(void *ctx)
201 {
202 	struct spdk_accel_task *task = (struct spdk_accel_task *)ctx;
203 
204 	spdk_accel_task_complete(task, task->status);
205 }
206 
207 static void
208 _accel_dsa_dix_verify_generate_cb(void *cb_arg, int status)
209 {
210 	struct idxd_task *idxd_task = cb_arg;
211 	struct iovec *original_mdiov = idxd_task->task.d.iovs;
212 	size_t mdiov_len = idxd_task->md_iov.iov_len;
213 	int rc;
214 
215 	if (status != 0) {
216 		SPDK_ERRLOG("Unable to complete DIX Verify (DIX Generate failed)\n");
217 		goto end;
218 	}
219 
220 	rc = memcmp(original_mdiov->iov_base, idxd_task->md_iov.iov_base, mdiov_len);
221 	if (rc != 0) {
222 		SPDK_ERRLOG("DIX Verify failed\n");
223 		status = -EINVAL;
224 		rc = spdk_dix_verify(idxd_task->task.s.iovs, idxd_task->task.s.iovcnt,
225 				     original_mdiov, idxd_task->task.dif.num_blocks,
226 				     idxd_task->task.dif.ctx, idxd_task->task.dif.err);
227 		if (rc != 0) {
228 			SPDK_ERRLOG("DIX error detected. type=%d, offset=%" PRIu32 "\n",
229 				    idxd_task->task.dif.err->err_type,
230 				    idxd_task->task.dif.err->err_offset);
231 		}
232 	}
233 
234 end:
235 	spdk_iobuf_put(&idxd_task->chan->iobuf, idxd_task->md_iov.iov_base, mdiov_len);
236 	dsa_done(idxd_task, status);
237 }
238 
239 static void
240 _accel_dsa_dix_verify(struct idxd_task *idxd_task)
241 {
242 	int rc;
243 
244 	/* Since Intel DSA doesn't provide a separate DIX Verify operation, it is done
245 	 * in two steps: DIX Generate to a new buffer and mem compare.
246 	 */
247 	rc = spdk_idxd_submit_dix_generate(idxd_task->chan->chan, idxd_task->task.s.iovs,
248 					   idxd_task->task.s.iovcnt, &idxd_task->md_iov, idxd_task->task.dif.num_blocks,
249 					   idxd_task->task.dif.ctx, 0, _accel_dsa_dix_verify_generate_cb, idxd_task);
250 	if (rc != 0) {
251 		SPDK_ERRLOG("Unable to complete DIX Verify (DIX Generate failed)\n");
252 		spdk_iobuf_put(&idxd_task->chan->iobuf, idxd_task->md_iov.iov_base,
253 			       idxd_task->md_iov.iov_len);
254 		dsa_done(idxd_task, rc);
255 	}
256 }
257 
258 static void
259 accel_dsa_dix_verify_get_iobuf_cb(struct spdk_iobuf_entry *iobuf, void *buf)
260 {
261 	struct idxd_task *idxd_task;
262 
263 	idxd_task = SPDK_CONTAINEROF(iobuf, struct idxd_task, iobuf);
264 	idxd_task->md_iov.iov_base = buf;
265 	_accel_dsa_dix_verify(idxd_task);
266 }
267 
268 static int
269 accel_dsa_dix_verify(struct idxd_io_channel *chan, int flags,
270 		     struct idxd_task *idxd_task)
271 {
272 	idxd_task->md_iov.iov_len = idxd_task->task.d.iovs[0].iov_len;
273 	idxd_task->md_iov.iov_base = spdk_iobuf_get(&chan->iobuf, idxd_task->md_iov.iov_len,
274 				     &idxd_task->iobuf, accel_dsa_dix_verify_get_iobuf_cb);
275 
276 	if (idxd_task->md_iov.iov_base != NULL) {
277 		_accel_dsa_dix_verify(idxd_task);
278 	}
279 
280 	return 0;
281 }
282 
283 static int
284 _process_single_task(struct spdk_io_channel *ch, struct spdk_accel_task *task)
285 {
286 	struct idxd_io_channel *chan = spdk_io_channel_get_ctx(ch);
287 	struct idxd_task *idxd_task;
288 	int rc = 0, flags = 0;
289 
290 	idxd_task = SPDK_CONTAINEROF(task, struct idxd_task, task);
291 	idxd_task->chan = chan;
292 
293 	switch (task->op_code) {
294 	case SPDK_ACCEL_OPC_COPY:
295 		rc = spdk_idxd_submit_copy(chan->chan, task->d.iovs, task->d.iovcnt,
296 					   task->s.iovs, task->s.iovcnt, flags, dsa_done, idxd_task);
297 		break;
298 	case SPDK_ACCEL_OPC_DUALCAST:
299 		rc = idxd_submit_dualcast(chan, idxd_task, flags);
300 		break;
301 	case SPDK_ACCEL_OPC_COMPARE:
302 		rc = spdk_idxd_submit_compare(chan->chan, task->s.iovs, task->s.iovcnt,
303 					      task->s2.iovs, task->s2.iovcnt, flags,
304 					      dsa_done, idxd_task);
305 		break;
306 	case SPDK_ACCEL_OPC_FILL:
307 		rc = spdk_idxd_submit_fill(chan->chan, task->d.iovs, task->d.iovcnt,
308 					   task->fill_pattern, flags, dsa_done, idxd_task);
309 		break;
310 	case SPDK_ACCEL_OPC_CRC32C:
311 		rc = spdk_idxd_submit_crc32c(chan->chan, task->s.iovs, task->s.iovcnt, task->seed,
312 					     task->crc_dst, flags, dsa_done, idxd_task);
313 		break;
314 	case SPDK_ACCEL_OPC_COPY_CRC32C:
315 		rc = spdk_idxd_submit_copy_crc32c(chan->chan, task->d.iovs, task->d.iovcnt,
316 						  task->s.iovs, task->s.iovcnt,
317 						  task->seed, task->crc_dst, flags,
318 						  dsa_done, idxd_task);
319 		break;
320 	case SPDK_ACCEL_OPC_DIF_VERIFY:
321 		rc = spdk_idxd_submit_dif_check(chan->chan,
322 						task->s.iovs, task->s.iovcnt,
323 						task->dif.num_blocks, task->dif.ctx, flags,
324 						dsa_done, idxd_task);
325 		break;
326 	case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
327 		rc = spdk_idxd_submit_dif_insert(chan->chan,
328 						 task->d.iovs, task->d.iovcnt,
329 						 task->s.iovs, task->s.iovcnt,
330 						 task->dif.num_blocks, task->dif.ctx, flags,
331 						 dsa_done, idxd_task);
332 		break;
333 	case SPDK_ACCEL_OPC_DIF_VERIFY_COPY:
334 		/* For DIF strip operations, DSA may incorrectly report an overlapping buffer
335 		 * error if the destination buffer immediately precedes the source buffer.
336 		 * This is because DSA uses the transfer size in the descriptor for both
337 		 * the source and destination buffers when checking for buffer overlap.
338 		 * Since the transfer size applies to the source buffer, which is larger
339 		 * than the destination buffer by metadata, it should not be used as
340 		 * the destination buffer size. To avoid reporting errors by DSA, the software
341 		 * checks whether such an error condition can occur, and if so the software
342 		 * fallback is performed. */
343 		rc = check_dsa_dif_strip_overlap_bufs(task);
344 		if (rc == 0) {
345 			rc = spdk_idxd_submit_dif_strip(chan->chan,
346 							task->d.iovs, task->d.iovcnt,
347 							task->s.iovs, task->s.iovcnt,
348 							task->dif.num_blocks, task->dif.ctx, flags,
349 							dsa_done, idxd_task);
350 		} else if (rc == -EFAULT) {
351 			rc = spdk_dif_verify_copy(task->d.iovs,
352 						  task->d.iovcnt,
353 						  task->s.iovs,
354 						  task->s.iovcnt,
355 						  task->dif.num_blocks,
356 						  task->dif.ctx,
357 						  task->dif.err);
358 			idxd_task->task.status = rc;
359 			spdk_thread_send_msg(spdk_get_thread(), spdk_accel_sw_task_complete, (void *)&idxd_task->task);
360 			rc = 0;
361 		}
362 		break;
363 	case SPDK_ACCEL_OPC_DIX_GENERATE:
364 		rc = spdk_idxd_submit_dix_generate(chan->chan, task->s.iovs, task->s.iovcnt,
365 						   task->d.iovs, task->dif.num_blocks,
366 						   task->dif.ctx, flags, dsa_done, idxd_task);
367 		break;
368 	case SPDK_ACCEL_OPC_DIX_VERIFY:
369 		rc = accel_dsa_dix_verify(chan, flags, idxd_task);
370 		break;
371 	default:
372 		assert(false);
373 		rc = -EINVAL;
374 		break;
375 	}
376 
377 	if (rc == 0) {
378 		chan->num_outstanding++;
379 		spdk_trace_record(TRACE_ACCEL_DSA_OP_SUBMIT, 0, 0, 0, chan->num_outstanding);
380 	}
381 
382 	return rc;
383 }
384 
385 static int
386 dsa_submit_task(struct spdk_io_channel *ch, struct spdk_accel_task *task)
387 {
388 	struct idxd_io_channel *chan = spdk_io_channel_get_ctx(ch);
389 	int rc = 0;
390 
391 	assert(STAILQ_NEXT(task, link) == NULL);
392 
393 	if (spdk_unlikely(chan->state == IDXD_CHANNEL_ERROR)) {
394 		spdk_accel_task_complete(task, -EINVAL);
395 		return 0;
396 	}
397 
398 	if (!STAILQ_EMPTY(&chan->queued_tasks)) {
399 		STAILQ_INSERT_TAIL(&chan->queued_tasks, task, link);
400 		return 0;
401 	}
402 
403 	rc = _process_single_task(ch, task);
404 	if (rc == -EBUSY) {
405 		STAILQ_INSERT_TAIL(&chan->queued_tasks, task, link);
406 	} else if (rc) {
407 		spdk_accel_task_complete(task, rc);
408 	}
409 
410 	return 0;
411 }
412 
413 static int
414 dsa_submit_queued_tasks(struct idxd_io_channel *chan)
415 {
416 	struct spdk_accel_task *task, *tmp;
417 	struct spdk_io_channel *ch = spdk_io_channel_from_ctx(chan);
418 	int rc = 0;
419 
420 	if (spdk_unlikely(chan->state == IDXD_CHANNEL_ERROR)) {
421 		/* Complete queued tasks with error and clear the list */
422 		while ((task = STAILQ_FIRST(&chan->queued_tasks))) {
423 			STAILQ_REMOVE_HEAD(&chan->queued_tasks, link);
424 			spdk_accel_task_complete(task, -EINVAL);
425 		}
426 		return 0;
427 	}
428 
429 	STAILQ_FOREACH_SAFE(task, &chan->queued_tasks, link, tmp) {
430 		rc = _process_single_task(ch, task);
431 		if (rc == -EBUSY) {
432 			return rc;
433 		}
434 		STAILQ_REMOVE_HEAD(&chan->queued_tasks, link);
435 		if (rc) {
436 			spdk_accel_task_complete(task, rc);
437 		}
438 	}
439 
440 	return 0;
441 }
442 
443 static int
444 idxd_poll(void *arg)
445 {
446 	struct idxd_io_channel *chan = arg;
447 	int count;
448 
449 	count = spdk_idxd_process_events(chan->chan);
450 
451 	/* Check if there are any pending ops to process if the channel is active */
452 	if (!STAILQ_EMPTY(&chan->queued_tasks)) {
453 		dsa_submit_queued_tasks(chan);
454 	}
455 
456 	return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
457 }
458 
459 static size_t
460 accel_dsa_get_ctx_size(void)
461 {
462 	return sizeof(struct idxd_task);
463 }
464 
465 static bool
466 dsa_supports_opcode(enum spdk_accel_opcode opc)
467 {
468 	if (!g_dsa_initialized) {
469 		assert(0);
470 		return false;
471 	}
472 
473 	switch (opc) {
474 	case SPDK_ACCEL_OPC_COPY:
475 	case SPDK_ACCEL_OPC_FILL:
476 	case SPDK_ACCEL_OPC_DUALCAST:
477 	case SPDK_ACCEL_OPC_COMPARE:
478 	case SPDK_ACCEL_OPC_CRC32C:
479 	case SPDK_ACCEL_OPC_COPY_CRC32C:
480 		return true;
481 	case SPDK_ACCEL_OPC_DIF_VERIFY:
482 	case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
483 	case SPDK_ACCEL_OPC_DIF_VERIFY_COPY:
484 	/* In theory, DIX Generate could work without the iommu, but iommu is required
485 	 * for consistency with other DIF operations.
486 	 */
487 	case SPDK_ACCEL_OPC_DIX_GENERATE:
488 	case SPDK_ACCEL_OPC_DIX_VERIFY:
489 		/* Supported only if the IOMMU is enabled */
490 		return spdk_iommu_is_enabled();
491 	default:
492 		return false;
493 	}
494 }
495 
496 static int accel_dsa_init(void);
497 static void accel_dsa_exit(void *ctx);
498 static void accel_dsa_write_config_json(struct spdk_json_write_ctx *w);
499 
500 static struct spdk_accel_module_if g_dsa_module = {
501 	.module_init = accel_dsa_init,
502 	.module_fini = accel_dsa_exit,
503 	.write_config_json = accel_dsa_write_config_json,
504 	.get_ctx_size = accel_dsa_get_ctx_size,
505 	.name			= "dsa",
506 	.supports_opcode	= dsa_supports_opcode,
507 	.get_io_channel		= dsa_get_io_channel,
508 	.submit_tasks		= dsa_submit_task
509 };
510 
511 static int
512 dsa_create_cb(void *io_device, void *ctx_buf)
513 {
514 	struct idxd_io_channel *chan = ctx_buf;
515 	struct idxd_device *dsa;
516 	int rc;
517 
518 	dsa = idxd_select_device(chan);
519 	if (dsa == NULL) {
520 		SPDK_ERRLOG("Failed to get an idxd channel\n");
521 		return -EINVAL;
522 	}
523 
524 	chan->dev = dsa;
525 	chan->poller = SPDK_POLLER_REGISTER(idxd_poll, chan, 0);
526 	STAILQ_INIT(&chan->queued_tasks);
527 	chan->num_outstanding = 0;
528 	chan->state = IDXD_CHANNEL_ACTIVE;
529 	rc = spdk_iobuf_channel_init(&chan->iobuf, "accel_dsa",
530 				     ACCEL_DSA_MD_IOBUF_SMALL_CACHE_SIZE,
531 				     ACCEL_DSA_MD_IOBUF_LARGE_CACHE_SIZE);
532 	if (rc != 0) {
533 		SPDK_ERRLOG("Failed to create an iobuf channel in accel dsa\n");
534 		return -ENOMEM;
535 	}
536 
537 	return 0;
538 }
539 
540 static void
541 dsa_destroy_cb(void *io_device, void *ctx_buf)
542 {
543 	struct idxd_io_channel *chan = ctx_buf;
544 
545 	spdk_iobuf_channel_fini(&chan->iobuf);
546 	spdk_poller_unregister(&chan->poller);
547 	spdk_idxd_put_channel(chan->chan);
548 }
549 
550 static struct spdk_io_channel *
551 dsa_get_io_channel(void)
552 {
553 	return spdk_get_io_channel(&g_dsa_module);
554 }
555 
556 static void
557 attach_cb(void *cb_ctx, struct spdk_idxd_device *idxd)
558 {
559 	struct idxd_device *dev;
560 
561 	dev = calloc(1, sizeof(*dev));
562 	if (dev == NULL) {
563 		SPDK_ERRLOG("Failed to allocate device struct\n");
564 		return;
565 	}
566 
567 	dev->dsa = idxd;
568 	if (g_next_dev == NULL) {
569 		g_next_dev = dev;
570 	}
571 
572 	TAILQ_INSERT_TAIL(&g_dsa_devices, dev, tailq);
573 	g_num_devices++;
574 }
575 
576 int
577 accel_dsa_enable_probe(bool kernel_mode)
578 {
579 	int rc;
580 
581 	if (g_dsa_enable) {
582 		return -EALREADY;
583 	}
584 
585 	rc = spdk_idxd_set_config(kernel_mode);
586 	if (rc != 0) {
587 		return rc;
588 	}
589 
590 	spdk_accel_module_list_add(&g_dsa_module);
591 	g_kernel_mode = kernel_mode;
592 	g_dsa_enable = true;
593 
594 	return 0;
595 }
596 
597 static bool
598 probe_cb(void *cb_ctx, struct spdk_pci_device *dev)
599 {
600 	if (dev->id.device_id == PCI_DEVICE_ID_INTEL_DSA) {
601 		return true;
602 	}
603 
604 	return false;
605 }
606 
607 static int
608 accel_dsa_init(void)
609 {
610 	int rc;
611 
612 	if (!g_dsa_enable) {
613 		return -EINVAL;
614 	}
615 
616 	if (spdk_idxd_probe(NULL, attach_cb, probe_cb) != 0) {
617 		SPDK_ERRLOG("spdk_idxd_probe() failed\n");
618 		return -EINVAL;
619 	}
620 
621 	if (TAILQ_EMPTY(&g_dsa_devices)) {
622 		return -ENODEV;
623 	}
624 
625 	rc = spdk_iobuf_register_module("accel_dsa");
626 	if (rc != 0) {
627 		SPDK_ERRLOG("Failed to register accel_dsa iobuf module\n");
628 		return rc;
629 	}
630 
631 	g_dsa_initialized = true;
632 	spdk_io_device_register(&g_dsa_module, dsa_create_cb, dsa_destroy_cb,
633 				sizeof(struct idxd_io_channel), "dsa_accel_module");
634 	return 0;
635 }
636 
637 static void
638 accel_dsa_exit(void *ctx)
639 {
640 	struct idxd_device *dev;
641 
642 	if (g_dsa_initialized) {
643 		spdk_io_device_unregister(&g_dsa_module, NULL);
644 		g_dsa_initialized = false;
645 	}
646 
647 	while (!TAILQ_EMPTY(&g_dsa_devices)) {
648 		dev = TAILQ_FIRST(&g_dsa_devices);
649 		TAILQ_REMOVE(&g_dsa_devices, dev, tailq);
650 		spdk_idxd_detach(dev->dsa);
651 		free(dev);
652 	}
653 
654 	spdk_accel_module_finish();
655 }
656 
657 static void
658 accel_dsa_write_config_json(struct spdk_json_write_ctx *w)
659 {
660 	if (g_dsa_enable) {
661 		spdk_json_write_object_begin(w);
662 		spdk_json_write_named_string(w, "method", "dsa_scan_accel_module");
663 		spdk_json_write_named_object_begin(w, "params");
664 		spdk_json_write_named_bool(w, "config_kernel_mode", g_kernel_mode);
665 		spdk_json_write_object_end(w);
666 		spdk_json_write_object_end(w);
667 	}
668 }
669 
670 SPDK_TRACE_REGISTER_FN(dsa_trace, "dsa", TRACE_GROUP_ACCEL_DSA)
671 {
672 	spdk_trace_register_description("DSA_OP_SUBMIT", TRACE_ACCEL_DSA_OP_SUBMIT, OWNER_TYPE_NONE,
673 					OBJECT_NONE, 0,
674 					SPDK_TRACE_ARG_TYPE_INT, "count");
675 	spdk_trace_register_description("DSA_OP_COMPLETE", TRACE_ACCEL_DSA_OP_COMPLETE, OWNER_TYPE_NONE,
676 					OBJECT_NONE,
677 					0, SPDK_TRACE_ARG_TYPE_INT, "count");
678 }
679 
680 SPDK_LOG_REGISTER_COMPONENT(accel_dsa)
681