xref: /spdk/lib/idxd/idxd_user.c (revision d491e7ea33f0f52fd9abbfc4fbfff6a7f3cf2ec2)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include "spdk/stdinc.h"
36 
37 #include "spdk/env.h"
38 #include "spdk/util.h"
39 #include "spdk/memory.h"
40 #include "spdk/likely.h"
41 
42 #include "spdk/log.h"
43 #include "spdk_internal/idxd.h"
44 
45 #include "idxd.h"
46 
47 struct spdk_user_idxd_device {
48 	struct spdk_idxd_device	idxd;
49 	struct spdk_pci_device	*device;
50 	int			sock_id;
51 	struct idxd_registers	registers;
52 	void			*reg_base;
53 	uint32_t		wqcfg_offset;
54 	uint32_t		grpcfg_offset;
55 	uint32_t                        ims_offset;
56 	uint32_t                        msix_perm_offset;
57 	uint32_t                        perfmon_offset;
58 };
59 
60 typedef bool (*spdk_idxd_probe_cb)(void *cb_ctx, struct spdk_pci_device *pci_dev);
61 
62 #define __user_idxd(idxd) (struct spdk_user_idxd_device *)idxd
63 
64 pthread_mutex_t	g_driver_lock = PTHREAD_MUTEX_INITIALIZER;
65 static struct device_config g_user_dev_cfg = {};
66 
67 static struct spdk_idxd_device *idxd_attach(struct spdk_pci_device *device);
68 
69 static uint32_t
70 _idxd_read_4(struct spdk_idxd_device *idxd, uint32_t offset)
71 {
72 	struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd);
73 
74 	return spdk_mmio_read_4((uint32_t *)(user_idxd->reg_base + offset));
75 }
76 
77 static void
78 _idxd_write_4(struct spdk_idxd_device *idxd, uint32_t offset, uint32_t value)
79 {
80 	struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd);
81 
82 	spdk_mmio_write_4((uint32_t *)(user_idxd->reg_base + offset), value);
83 }
84 
85 static uint64_t
86 _idxd_read_8(struct spdk_idxd_device *idxd, uint32_t offset)
87 {
88 	struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd);
89 
90 	return spdk_mmio_read_8((uint64_t *)(user_idxd->reg_base + offset));
91 }
92 
93 static uint64_t
94 idxd_read_8(struct spdk_idxd_device *idxd, void *portal, uint32_t offset)
95 {
96 	return _idxd_read_8(idxd, offset);
97 }
98 
99 static void
100 _idxd_write_8(struct spdk_idxd_device *idxd, uint32_t offset, uint64_t value)
101 {
102 	struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd);
103 
104 	spdk_mmio_write_8((uint64_t *)(user_idxd->reg_base + offset), value);
105 }
106 
107 static void
108 user_idxd_set_config(struct device_config *dev_cfg, uint32_t config_num)
109 {
110 	g_user_dev_cfg = *dev_cfg;
111 }
112 
113 /* Used for control commands, not for descriptor submission. */
114 static int
115 idxd_wait_cmd(struct spdk_idxd_device *idxd, int _timeout)
116 {
117 	uint32_t timeout = _timeout;
118 	union idxd_cmdsts_reg cmd_status = {};
119 
120 	cmd_status.raw = _idxd_read_4(idxd, IDXD_CMDSTS_OFFSET);
121 	while (cmd_status.active && --timeout) {
122 		usleep(1);
123 		cmd_status.raw = _idxd_read_4(idxd, IDXD_CMDSTS_OFFSET);
124 	}
125 
126 	/* Check for timeout */
127 	if (timeout == 0 && cmd_status.active) {
128 		SPDK_ERRLOG("Command timeout, waited %u\n", _timeout);
129 		return -EBUSY;
130 	}
131 
132 	/* Check for error */
133 	if (cmd_status.err) {
134 		SPDK_ERRLOG("Command status reg reports error 0x%x\n", cmd_status.err);
135 		return -EINVAL;
136 	}
137 
138 	return 0;
139 }
140 
141 static int
142 idxd_unmap_pci_bar(struct spdk_idxd_device *idxd, int bar)
143 {
144 	int rc = 0;
145 	void *addr = NULL;
146 	struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd);
147 
148 	if (bar == IDXD_MMIO_BAR) {
149 		addr = (void *)user_idxd->reg_base;
150 	} else if (bar == IDXD_WQ_BAR) {
151 		addr = (void *)idxd->portals;
152 	}
153 
154 	if (addr) {
155 		rc = spdk_pci_device_unmap_bar(user_idxd->device, 0, addr);
156 	}
157 	return rc;
158 }
159 
160 static int
161 idxd_map_pci_bars(struct spdk_idxd_device *idxd)
162 {
163 	int rc;
164 	void *addr;
165 	uint64_t phys_addr, size;
166 	struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd);
167 
168 	rc = spdk_pci_device_map_bar(user_idxd->device, IDXD_MMIO_BAR, &addr, &phys_addr, &size);
169 	if (rc != 0 || addr == NULL) {
170 		SPDK_ERRLOG("pci_device_map_range failed with error code %d\n", rc);
171 		return -1;
172 	}
173 	user_idxd->reg_base = addr;
174 
175 	rc = spdk_pci_device_map_bar(user_idxd->device, IDXD_WQ_BAR, &addr, &phys_addr, &size);
176 	if (rc != 0 || addr == NULL) {
177 		SPDK_ERRLOG("pci_device_map_range failed with error code %d\n", rc);
178 		rc = idxd_unmap_pci_bar(idxd, IDXD_MMIO_BAR);
179 		if (rc) {
180 			SPDK_ERRLOG("unable to unmap MMIO bar\n");
181 		}
182 		return -EINVAL;
183 	}
184 	idxd->portals = addr;
185 
186 	return 0;
187 }
188 
189 static int
190 idxd_reset_dev(struct spdk_idxd_device *idxd)
191 {
192 	int rc;
193 
194 	_idxd_write_4(idxd, IDXD_CMD_OFFSET, IDXD_RESET_DEVICE << IDXD_CMD_SHIFT);
195 	rc = idxd_wait_cmd(idxd, IDXD_REGISTER_TIMEOUT_US);
196 	if (rc < 0) {
197 		SPDK_ERRLOG("Error resetting device %u\n", rc);
198 	}
199 
200 	return rc;
201 }
202 
203 /*
204  * Build group config based on getting info from the device combined
205  * with the defined configuration. Once built, it is written to the
206  * device.
207  */
208 static int
209 idxd_group_config(struct spdk_idxd_device *idxd)
210 {
211 	int i;
212 	uint64_t base_offset;
213 	struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd);
214 
215 	assert(g_user_dev_cfg.num_groups <= user_idxd->registers.groupcap.num_groups);
216 	idxd->groups = calloc(user_idxd->registers.groupcap.num_groups, sizeof(struct idxd_group));
217 	if (idxd->groups == NULL) {
218 		SPDK_ERRLOG("Failed to allocate group memory\n");
219 		return -ENOMEM;
220 	}
221 
222 	assert(g_user_dev_cfg.total_engines <= user_idxd->registers.enginecap.num_engines);
223 	for (i = 0; i < g_user_dev_cfg.total_engines; i++) {
224 		idxd->groups[i % g_user_dev_cfg.num_groups].grpcfg.engines |= (1 << i);
225 	}
226 
227 	assert(g_user_dev_cfg.total_wqs <= user_idxd->registers.wqcap.num_wqs);
228 	for (i = 0; i < g_user_dev_cfg.total_wqs; i++) {
229 		idxd->groups[i % g_user_dev_cfg.num_groups].grpcfg.wqs[0] |= (1 << i);
230 	}
231 
232 	for (i = 0; i < g_user_dev_cfg.num_groups; i++) {
233 		idxd->groups[i].idxd = idxd;
234 		idxd->groups[i].id = i;
235 
236 		/* Divide BW tokens evenly */
237 		idxd->groups[i].grpcfg.flags.tokens_allowed =
238 			user_idxd->registers.groupcap.total_tokens / g_user_dev_cfg.num_groups;
239 	}
240 
241 	/*
242 	 * Now write the group config to the device for all groups. We write
243 	 * to the max number of groups in order to 0 out the ones we didn't
244 	 * configure.
245 	 */
246 	for (i = 0 ; i < user_idxd->registers.groupcap.num_groups; i++) {
247 
248 		base_offset = user_idxd->grpcfg_offset + i * 64;
249 
250 		/* GRPWQCFG, work queues config */
251 		_idxd_write_8(idxd, base_offset, idxd->groups[i].grpcfg.wqs[0]);
252 
253 		/* GRPENGCFG, engine config */
254 		_idxd_write_8(idxd, base_offset + CFG_ENGINE_OFFSET, idxd->groups[i].grpcfg.engines);
255 
256 		/* GRPFLAGS, flags config */
257 		_idxd_write_8(idxd, base_offset + CFG_FLAG_OFFSET, idxd->groups[i].grpcfg.flags.raw);
258 	}
259 
260 	return 0;
261 }
262 
263 /*
264  * Build work queue (WQ) config based on getting info from the device combined
265  * with the defined configuration. Once built, it is written to the device.
266  */
267 static int
268 idxd_wq_config(struct spdk_user_idxd_device *user_idxd)
269 {
270 	int i, j;
271 	struct idxd_wq *queue;
272 	struct spdk_idxd_device *idxd = &user_idxd->idxd;
273 	u_int32_t wq_size = user_idxd->registers.wqcap.total_wq_size / g_user_dev_cfg.total_wqs;
274 
275 	SPDK_NOTICELOG("Total ring slots available space 0x%x, so per work queue is 0x%x\n",
276 		       user_idxd->registers.wqcap.total_wq_size, wq_size);
277 	assert(g_user_dev_cfg.total_wqs <= IDXD_MAX_QUEUES);
278 	assert(g_user_dev_cfg.total_wqs <= user_idxd->registers.wqcap.num_wqs);
279 	assert(LOG2_WQ_MAX_BATCH <= user_idxd->registers.gencap.max_batch_shift);
280 	assert(LOG2_WQ_MAX_XFER <= user_idxd->registers.gencap.max_xfer_shift);
281 
282 	idxd->queues = calloc(1, user_idxd->registers.wqcap.num_wqs * sizeof(struct idxd_wq));
283 	if (idxd->queues == NULL) {
284 		SPDK_ERRLOG("Failed to allocate queue memory\n");
285 		return -ENOMEM;
286 	}
287 
288 	for (i = 0; i < g_user_dev_cfg.total_wqs; i++) {
289 		queue = &user_idxd->idxd.queues[i];
290 		queue->wqcfg.wq_size = wq_size;
291 		queue->wqcfg.mode = WQ_MODE_DEDICATED;
292 		queue->wqcfg.max_batch_shift = LOG2_WQ_MAX_BATCH;
293 		queue->wqcfg.max_xfer_shift = LOG2_WQ_MAX_XFER;
294 		queue->wqcfg.wq_state = WQ_ENABLED;
295 		queue->wqcfg.priority = WQ_PRIORITY_1;
296 
297 		/* Not part of the config struct */
298 		queue->idxd = &user_idxd->idxd;
299 		queue->group = &idxd->groups[i % g_user_dev_cfg.num_groups];
300 	}
301 
302 	/*
303 	 * Now write the work queue config to the device for all wq space
304 	 */
305 	for (i = 0 ; i < user_idxd->registers.wqcap.num_wqs; i++) {
306 		queue = &idxd->queues[i];
307 		for (j = 0 ; j < WQCFG_NUM_DWORDS; j++) {
308 			_idxd_write_4(idxd, user_idxd->wqcfg_offset + i * 32 + j * 4,
309 				      queue->wqcfg.raw[j]);
310 		}
311 	}
312 
313 	return 0;
314 }
315 
316 static int
317 idxd_device_configure(struct spdk_user_idxd_device *user_idxd)
318 {
319 	int i, rc = 0;
320 	union idxd_offsets_register offsets_reg;
321 	union idxd_genstatus_register genstatus_reg;
322 	struct spdk_idxd_device *idxd = &user_idxd->idxd;
323 
324 	/*
325 	 * Map BAR0 and BAR2
326 	 */
327 	rc = idxd_map_pci_bars(idxd);
328 	if (rc) {
329 		return rc;
330 	}
331 
332 	/*
333 	 * Reset the device
334 	 */
335 	rc = idxd_reset_dev(idxd);
336 	if (rc) {
337 		goto err_reset;
338 	}
339 
340 	/*
341 	 * Read in config registers
342 	 */
343 	user_idxd->registers.version = _idxd_read_4(idxd, IDXD_VERSION_OFFSET);
344 	user_idxd->registers.gencap.raw = _idxd_read_8(idxd, IDXD_GENCAP_OFFSET);
345 	user_idxd->registers.wqcap.raw = _idxd_read_8(idxd, IDXD_WQCAP_OFFSET);
346 	user_idxd->registers.groupcap.raw = _idxd_read_8(idxd, IDXD_GRPCAP_OFFSET);
347 	user_idxd->registers.enginecap.raw = _idxd_read_8(idxd, IDXD_ENGCAP_OFFSET);
348 	for (i = 0; i < IDXD_OPCAP_WORDS; i++) {
349 		user_idxd->registers.opcap.raw[i] =
350 			_idxd_read_8(idxd, i * sizeof(uint64_t) + IDXD_OPCAP_OFFSET);
351 	}
352 	offsets_reg.raw[0] = _idxd_read_8(idxd, IDXD_TABLE_OFFSET);
353 	offsets_reg.raw[1] = _idxd_read_8(idxd, IDXD_TABLE_OFFSET + sizeof(uint64_t));
354 	user_idxd->grpcfg_offset = offsets_reg.grpcfg * IDXD_TABLE_OFFSET_MULT;
355 	user_idxd->wqcfg_offset = offsets_reg.wqcfg * IDXD_TABLE_OFFSET_MULT;
356 	user_idxd->ims_offset = offsets_reg.ims * IDXD_TABLE_OFFSET_MULT;
357 	user_idxd->msix_perm_offset = offsets_reg.msix_perm  * IDXD_TABLE_OFFSET_MULT;
358 	user_idxd->perfmon_offset = offsets_reg.perfmon * IDXD_TABLE_OFFSET_MULT;
359 
360 	/*
361 	 * Configure groups and work queues.
362 	 */
363 	rc = idxd_group_config(idxd);
364 	if (rc) {
365 		goto err_group_cfg;
366 	}
367 
368 	rc = idxd_wq_config(user_idxd);
369 	if (rc) {
370 		goto err_wq_cfg;
371 	}
372 
373 	/*
374 	 * Enable the device
375 	 */
376 	genstatus_reg.raw = _idxd_read_4(idxd, IDXD_GENSTATUS_OFFSET);
377 	assert(genstatus_reg.state == IDXD_DEVICE_STATE_DISABLED);
378 
379 	_idxd_write_4(idxd, IDXD_CMD_OFFSET, IDXD_ENABLE_DEV << IDXD_CMD_SHIFT);
380 	rc = idxd_wait_cmd(idxd, IDXD_REGISTER_TIMEOUT_US);
381 	genstatus_reg.raw = _idxd_read_4(idxd, IDXD_GENSTATUS_OFFSET);
382 	if ((rc < 0) || (genstatus_reg.state != IDXD_DEVICE_STATE_ENABLED)) {
383 		rc = -EINVAL;
384 		SPDK_ERRLOG("Error enabling device %u\n", rc);
385 		goto err_device_enable;
386 	}
387 
388 	genstatus_reg.raw = spdk_mmio_read_4((uint32_t *)(user_idxd->reg_base + IDXD_GENSTATUS_OFFSET));
389 	assert(genstatus_reg.state == IDXD_DEVICE_STATE_ENABLED);
390 
391 	/*
392 	 * Enable the work queues that we've configured
393 	 */
394 	for (i = 0; i < g_user_dev_cfg.total_wqs; i++) {
395 		_idxd_write_4(idxd, IDXD_CMD_OFFSET,
396 			      (IDXD_ENABLE_WQ << IDXD_CMD_SHIFT) | i);
397 		rc = idxd_wait_cmd(idxd, IDXD_REGISTER_TIMEOUT_US);
398 		if (rc < 0) {
399 			SPDK_ERRLOG("Error enabling work queues 0x%x\n", rc);
400 			goto err_wq_enable;
401 		}
402 	}
403 
404 	if ((rc == 0) && (genstatus_reg.state == IDXD_DEVICE_STATE_ENABLED)) {
405 		SPDK_NOTICELOG("Device enabled, version 0x%x gencap: 0x%lx\n",
406 			       user_idxd->registers.version,
407 			       user_idxd->registers.gencap.raw);
408 
409 	}
410 
411 	return rc;
412 err_wq_enable:
413 err_device_enable:
414 	free(idxd->queues);
415 err_wq_cfg:
416 	free(idxd->groups);
417 err_group_cfg:
418 err_reset:
419 	idxd_unmap_pci_bar(idxd, IDXD_MMIO_BAR);
420 	idxd_unmap_pci_bar(idxd, IDXD_MMIO_BAR);
421 
422 	return rc;
423 }
424 
425 static void
426 user_idxd_device_destruct(struct spdk_idxd_device *idxd)
427 {
428 	struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd);
429 
430 	idxd_unmap_pci_bar(idxd, IDXD_MMIO_BAR);
431 	idxd_unmap_pci_bar(idxd, IDXD_WQ_BAR);
432 	free(idxd->groups);
433 	free(idxd->queues);
434 
435 	spdk_pci_device_detach(user_idxd->device);
436 	free(user_idxd);
437 }
438 
439 struct idxd_enum_ctx {
440 	spdk_idxd_probe_cb probe_cb;
441 	spdk_idxd_attach_cb attach_cb;
442 	void *cb_ctx;
443 };
444 
445 /* This function must only be called while holding g_driver_lock */
446 static int
447 idxd_enum_cb(void *ctx, struct spdk_pci_device *pci_dev)
448 {
449 	struct idxd_enum_ctx *enum_ctx = ctx;
450 	struct spdk_idxd_device *idxd;
451 
452 	if (enum_ctx->probe_cb(enum_ctx->cb_ctx, pci_dev)) {
453 		idxd = idxd_attach(pci_dev);
454 		if (idxd == NULL) {
455 			SPDK_ERRLOG("idxd_attach() failed\n");
456 			return -EINVAL;
457 		}
458 
459 		enum_ctx->attach_cb(enum_ctx->cb_ctx, idxd);
460 	}
461 
462 	return 0;
463 }
464 
465 
466 static bool
467 probe_cb(void *cb_ctx, struct spdk_pci_device *pci_dev)
468 {
469 	struct spdk_pci_addr pci_addr = spdk_pci_device_get_addr(pci_dev);
470 
471 	SPDK_NOTICELOG(
472 		" Found matching device at %04x:%02x:%02x.%x vendor:0x%04x device:0x%04x\n",
473 		pci_addr.domain,
474 		pci_addr.bus,
475 		pci_addr.dev,
476 		pci_addr.func,
477 		spdk_pci_device_get_vendor_id(pci_dev),
478 		spdk_pci_device_get_device_id(pci_dev));
479 
480 	/* Claim the device in case conflict with other process */
481 	if (spdk_pci_device_claim(pci_dev) < 0) {
482 		return false;
483 	}
484 
485 	return true;
486 }
487 
488 static int
489 user_idxd_probe(void *cb_ctx, spdk_idxd_attach_cb attach_cb)
490 {
491 	int rc;
492 	struct idxd_enum_ctx enum_ctx;
493 
494 	enum_ctx.probe_cb = probe_cb;
495 	enum_ctx.attach_cb = attach_cb;
496 	enum_ctx.cb_ctx = cb_ctx;
497 
498 	pthread_mutex_lock(&g_driver_lock);
499 	rc = spdk_pci_enumerate(spdk_pci_idxd_get_driver(), idxd_enum_cb, &enum_ctx);
500 	pthread_mutex_unlock(&g_driver_lock);
501 
502 	return rc;
503 }
504 
505 static char *
506 user_idxd_portal_get_addr(struct spdk_idxd_device *idxd)
507 {
508 	return (char *)idxd->portals + idxd->wq_id * PORTAL_SIZE;
509 }
510 
511 static bool
512 user_idxd_nop_check(struct spdk_idxd_device *idxd)
513 {
514 	struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd);
515 
516 	/* TODO: temp workaround for simulator.  Remove this function when fixed or w/silicon. */
517 	if (user_idxd->registers.gencap.raw == 0x1833f011f) {
518 		return true;
519 	}
520 
521 	return false;
522 }
523 
524 static struct spdk_idxd_impl g_user_idxd_impl = {
525 	.name			= "user",
526 	.set_config		= user_idxd_set_config,
527 	.probe			= user_idxd_probe,
528 	.destruct		= user_idxd_device_destruct,
529 	.read_8			= idxd_read_8,
530 	.portal_get_addr	= user_idxd_portal_get_addr,
531 	.nop_check		= user_idxd_nop_check,
532 };
533 
534 /* Caller must hold g_driver_lock */
535 static struct spdk_idxd_device *
536 idxd_attach(struct spdk_pci_device *device)
537 {
538 	struct spdk_user_idxd_device *user_idxd;
539 	struct spdk_idxd_device *idxd;
540 	uint32_t cmd_reg;
541 	int rc;
542 
543 	user_idxd = calloc(1, sizeof(struct spdk_user_idxd_device));
544 	if (user_idxd == NULL) {
545 		SPDK_ERRLOG("Failed to allocate memory for user_idxd device.\n");
546 		return NULL;
547 	}
548 
549 	idxd = &user_idxd->idxd;
550 	user_idxd->device = device;
551 	idxd->impl = &g_user_idxd_impl;
552 	pthread_mutex_init(&idxd->num_channels_lock, NULL);
553 
554 	/* Enable PCI busmaster. */
555 	spdk_pci_device_cfg_read32(device, &cmd_reg, 4);
556 	cmd_reg |= 0x4;
557 	spdk_pci_device_cfg_write32(device, cmd_reg, 4);
558 
559 	rc = idxd_device_configure(user_idxd);
560 	if (rc) {
561 		goto err;
562 	}
563 
564 	return idxd;
565 err:
566 	user_idxd_device_destruct(idxd);
567 	return NULL;
568 }
569 
570 SPDK_IDXD_IMPL_REGISTER(user, &g_user_idxd_impl);
571