xref: /spdk/lib/idxd/idxd_user.c (revision da2fd6651a9cd4732b0910d30291821e77f4d643)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include "spdk/stdinc.h"
36 
37 #include "spdk/env.h"
38 #include "spdk/util.h"
39 #include "spdk/memory.h"
40 #include "spdk/likely.h"
41 
42 #include "spdk/log.h"
43 #include "spdk_internal/idxd.h"
44 
45 #include "idxd.h"
46 
47 struct spdk_user_idxd_device {
48 	struct spdk_idxd_device	idxd;
49 	struct spdk_pci_device	*device;
50 	int			sock_id;
51 	struct idxd_registers	registers;
52 	void			*reg_base;
53 	uint32_t		wqcfg_offset;
54 	uint32_t		grpcfg_offset;
55 	uint32_t		ims_offset;
56 	uint32_t		msix_perm_offset;
57 	uint32_t		perfmon_offset;
58 };
59 
60 typedef bool (*spdk_idxd_probe_cb)(void *cb_ctx, struct spdk_pci_device *pci_dev);
61 
62 #define __user_idxd(idxd) (struct spdk_user_idxd_device *)idxd
63 
64 pthread_mutex_t	g_driver_lock = PTHREAD_MUTEX_INITIALIZER;
65 static struct device_config g_user_dev_cfg = {};
66 
67 static struct spdk_idxd_device *idxd_attach(struct spdk_pci_device *device);
68 
69 static uint32_t
70 _idxd_read_4(struct spdk_idxd_device *idxd, uint32_t offset)
71 {
72 	struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd);
73 
74 	return spdk_mmio_read_4((uint32_t *)(user_idxd->reg_base + offset));
75 }
76 
77 static void
78 _idxd_write_4(struct spdk_idxd_device *idxd, uint32_t offset, uint32_t value)
79 {
80 	struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd);
81 
82 	spdk_mmio_write_4((uint32_t *)(user_idxd->reg_base + offset), value);
83 }
84 
85 static uint64_t
86 _idxd_read_8(struct spdk_idxd_device *idxd, uint32_t offset)
87 {
88 	struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd);
89 
90 	return spdk_mmio_read_8((uint64_t *)(user_idxd->reg_base + offset));
91 }
92 
93 static void
94 _idxd_write_8(struct spdk_idxd_device *idxd, uint32_t offset, uint64_t value)
95 {
96 	struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd);
97 
98 	spdk_mmio_write_8((uint64_t *)(user_idxd->reg_base + offset), value);
99 }
100 
101 static void
102 user_idxd_set_config(struct device_config *dev_cfg, uint32_t config_num)
103 {
104 	g_user_dev_cfg = *dev_cfg;
105 }
106 
107 /* Used for control commands, not for descriptor submission. */
108 static int
109 idxd_wait_cmd(struct spdk_idxd_device *idxd, int _timeout)
110 {
111 	uint32_t timeout = _timeout;
112 	union idxd_cmdsts_reg cmd_status = {};
113 
114 	cmd_status.raw = _idxd_read_4(idxd, IDXD_CMDSTS_OFFSET);
115 	while (cmd_status.active && --timeout) {
116 		usleep(1);
117 		cmd_status.raw = _idxd_read_4(idxd, IDXD_CMDSTS_OFFSET);
118 	}
119 
120 	/* Check for timeout */
121 	if (timeout == 0 && cmd_status.active) {
122 		SPDK_ERRLOG("Command timeout, waited %u\n", _timeout);
123 		return -EBUSY;
124 	}
125 
126 	/* Check for error */
127 	if (cmd_status.err) {
128 		SPDK_ERRLOG("Command status reg reports error 0x%x\n", cmd_status.err);
129 		return -EINVAL;
130 	}
131 
132 	return 0;
133 }
134 
135 static int
136 idxd_unmap_pci_bar(struct spdk_idxd_device *idxd, int bar)
137 {
138 	int rc = 0;
139 	void *addr = NULL;
140 	struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd);
141 
142 	if (bar == IDXD_MMIO_BAR) {
143 		addr = (void *)user_idxd->reg_base;
144 	} else if (bar == IDXD_WQ_BAR) {
145 		addr = (void *)idxd->portals;
146 	}
147 
148 	if (addr) {
149 		rc = spdk_pci_device_unmap_bar(user_idxd->device, 0, addr);
150 	}
151 	return rc;
152 }
153 
154 static int
155 idxd_map_pci_bars(struct spdk_idxd_device *idxd)
156 {
157 	int rc;
158 	void *addr;
159 	uint64_t phys_addr, size;
160 	struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd);
161 
162 	rc = spdk_pci_device_map_bar(user_idxd->device, IDXD_MMIO_BAR, &addr, &phys_addr, &size);
163 	if (rc != 0 || addr == NULL) {
164 		SPDK_ERRLOG("pci_device_map_range failed with error code %d\n", rc);
165 		return -1;
166 	}
167 	user_idxd->reg_base = addr;
168 
169 	rc = spdk_pci_device_map_bar(user_idxd->device, IDXD_WQ_BAR, &addr, &phys_addr, &size);
170 	if (rc != 0 || addr == NULL) {
171 		SPDK_ERRLOG("pci_device_map_range failed with error code %d\n", rc);
172 		rc = idxd_unmap_pci_bar(idxd, IDXD_MMIO_BAR);
173 		if (rc) {
174 			SPDK_ERRLOG("unable to unmap MMIO bar\n");
175 		}
176 		return -EINVAL;
177 	}
178 	idxd->portals = addr;
179 
180 	return 0;
181 }
182 
183 static int
184 idxd_reset_dev(struct spdk_idxd_device *idxd)
185 {
186 	int rc;
187 
188 	_idxd_write_4(idxd, IDXD_CMD_OFFSET, IDXD_RESET_DEVICE << IDXD_CMD_SHIFT);
189 	rc = idxd_wait_cmd(idxd, IDXD_REGISTER_TIMEOUT_US);
190 	if (rc < 0) {
191 		SPDK_ERRLOG("Error resetting device %u\n", rc);
192 	}
193 
194 	return rc;
195 }
196 
197 /*
198  * Build group config based on getting info from the device combined
199  * with the defined configuration. Once built, it is written to the
200  * device.
201  */
202 static int
203 idxd_group_config(struct spdk_idxd_device *idxd)
204 {
205 	int i;
206 	uint64_t base_offset;
207 	struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd);
208 
209 	assert(g_user_dev_cfg.num_groups <= user_idxd->registers.groupcap.num_groups);
210 	idxd->groups = calloc(user_idxd->registers.groupcap.num_groups, sizeof(struct idxd_group));
211 	if (idxd->groups == NULL) {
212 		SPDK_ERRLOG("Failed to allocate group memory\n");
213 		return -ENOMEM;
214 	}
215 
216 	assert(g_user_dev_cfg.total_engines <= user_idxd->registers.enginecap.num_engines);
217 	for (i = 0; i < g_user_dev_cfg.total_engines; i++) {
218 		idxd->groups[i % g_user_dev_cfg.num_groups].grpcfg.engines |= (1 << i);
219 	}
220 
221 	assert(g_user_dev_cfg.total_wqs <= user_idxd->registers.wqcap.num_wqs);
222 	for (i = 0; i < g_user_dev_cfg.total_wqs; i++) {
223 		idxd->groups[i % g_user_dev_cfg.num_groups].grpcfg.wqs[0] |= (1 << i);
224 	}
225 
226 	for (i = 0; i < g_user_dev_cfg.num_groups; i++) {
227 		idxd->groups[i].idxd = idxd;
228 		idxd->groups[i].id = i;
229 
230 		/* Divide BW tokens evenly */
231 		idxd->groups[i].grpcfg.flags.tokens_allowed =
232 			user_idxd->registers.groupcap.total_tokens / g_user_dev_cfg.num_groups;
233 	}
234 
235 	/*
236 	 * Now write the group config to the device for all groups. We write
237 	 * to the max number of groups in order to 0 out the ones we didn't
238 	 * configure.
239 	 */
240 	for (i = 0 ; i < user_idxd->registers.groupcap.num_groups; i++) {
241 
242 		base_offset = user_idxd->grpcfg_offset + i * 64;
243 
244 		/* GRPWQCFG, work queues config */
245 		_idxd_write_8(idxd, base_offset, idxd->groups[i].grpcfg.wqs[0]);
246 
247 		/* GRPENGCFG, engine config */
248 		_idxd_write_8(idxd, base_offset + CFG_ENGINE_OFFSET, idxd->groups[i].grpcfg.engines);
249 
250 		/* GRPFLAGS, flags config */
251 		_idxd_write_8(idxd, base_offset + CFG_FLAG_OFFSET, idxd->groups[i].grpcfg.flags.raw);
252 	}
253 
254 	return 0;
255 }
256 
257 /*
258  * Build work queue (WQ) config based on getting info from the device combined
259  * with the defined configuration. Once built, it is written to the device.
260  */
261 static int
262 idxd_wq_config(struct spdk_user_idxd_device *user_idxd)
263 {
264 	int i, j;
265 	struct idxd_wq *queue;
266 	struct spdk_idxd_device *idxd = &user_idxd->idxd;
267 	u_int32_t wq_size = user_idxd->registers.wqcap.total_wq_size / g_user_dev_cfg.total_wqs;
268 
269 	SPDK_NOTICELOG("Total ring slots available space 0x%x, so per work queue is 0x%x\n",
270 		       user_idxd->registers.wqcap.total_wq_size, wq_size);
271 	assert(g_user_dev_cfg.total_wqs <= IDXD_MAX_QUEUES);
272 	assert(g_user_dev_cfg.total_wqs <= user_idxd->registers.wqcap.num_wqs);
273 	assert(LOG2_WQ_MAX_BATCH <= user_idxd->registers.gencap.max_batch_shift);
274 	assert(LOG2_WQ_MAX_XFER <= user_idxd->registers.gencap.max_xfer_shift);
275 
276 	idxd->total_wq_size = user_idxd->registers.wqcap.total_wq_size;
277 	idxd->queues = calloc(1, user_idxd->registers.wqcap.num_wqs * sizeof(struct idxd_wq));
278 	if (idxd->queues == NULL) {
279 		SPDK_ERRLOG("Failed to allocate queue memory\n");
280 		return -ENOMEM;
281 	}
282 
283 	for (i = 0; i < g_user_dev_cfg.total_wqs; i++) {
284 		queue = &user_idxd->idxd.queues[i];
285 		queue->wqcfg.wq_size = wq_size;
286 		queue->wqcfg.mode = WQ_MODE_DEDICATED;
287 		queue->wqcfg.max_batch_shift = LOG2_WQ_MAX_BATCH;
288 		queue->wqcfg.max_xfer_shift = LOG2_WQ_MAX_XFER;
289 		queue->wqcfg.wq_state = WQ_ENABLED;
290 		queue->wqcfg.priority = WQ_PRIORITY_1;
291 
292 		/* Not part of the config struct */
293 		queue->idxd = &user_idxd->idxd;
294 		queue->group = &idxd->groups[i % g_user_dev_cfg.num_groups];
295 	}
296 
297 	/*
298 	 * Now write the work queue config to the device for all wq space
299 	 */
300 	for (i = 0 ; i < user_idxd->registers.wqcap.num_wqs; i++) {
301 		queue = &idxd->queues[i];
302 		for (j = 0 ; j < WQCFG_NUM_DWORDS; j++) {
303 			_idxd_write_4(idxd, user_idxd->wqcfg_offset + i * 32 + j * 4,
304 				      queue->wqcfg.raw[j]);
305 		}
306 	}
307 
308 	return 0;
309 }
310 
311 static int
312 idxd_device_configure(struct spdk_user_idxd_device *user_idxd)
313 {
314 	int i, rc = 0;
315 	union idxd_offsets_register offsets_reg;
316 	union idxd_genstatus_register genstatus_reg;
317 	struct spdk_idxd_device *idxd = &user_idxd->idxd;
318 
319 	/*
320 	 * Map BAR0 and BAR2
321 	 */
322 	rc = idxd_map_pci_bars(idxd);
323 	if (rc) {
324 		return rc;
325 	}
326 
327 	/*
328 	 * Reset the device
329 	 */
330 	rc = idxd_reset_dev(idxd);
331 	if (rc) {
332 		goto err_reset;
333 	}
334 
335 	/*
336 	 * Read in config registers
337 	 */
338 	user_idxd->registers.version = _idxd_read_4(idxd, IDXD_VERSION_OFFSET);
339 	user_idxd->registers.gencap.raw = _idxd_read_8(idxd, IDXD_GENCAP_OFFSET);
340 	user_idxd->registers.wqcap.raw = _idxd_read_8(idxd, IDXD_WQCAP_OFFSET);
341 	user_idxd->registers.groupcap.raw = _idxd_read_8(idxd, IDXD_GRPCAP_OFFSET);
342 	user_idxd->registers.enginecap.raw = _idxd_read_8(idxd, IDXD_ENGCAP_OFFSET);
343 	for (i = 0; i < IDXD_OPCAP_WORDS; i++) {
344 		user_idxd->registers.opcap.raw[i] =
345 			_idxd_read_8(idxd, i * sizeof(uint64_t) + IDXD_OPCAP_OFFSET);
346 	}
347 	offsets_reg.raw[0] = _idxd_read_8(idxd, IDXD_TABLE_OFFSET);
348 	offsets_reg.raw[1] = _idxd_read_8(idxd, IDXD_TABLE_OFFSET + sizeof(uint64_t));
349 	user_idxd->grpcfg_offset = offsets_reg.grpcfg * IDXD_TABLE_OFFSET_MULT;
350 	user_idxd->wqcfg_offset = offsets_reg.wqcfg * IDXD_TABLE_OFFSET_MULT;
351 	user_idxd->ims_offset = offsets_reg.ims * IDXD_TABLE_OFFSET_MULT;
352 	user_idxd->msix_perm_offset = offsets_reg.msix_perm  * IDXD_TABLE_OFFSET_MULT;
353 	user_idxd->perfmon_offset = offsets_reg.perfmon * IDXD_TABLE_OFFSET_MULT;
354 
355 	/*
356 	 * Configure groups and work queues.
357 	 */
358 	rc = idxd_group_config(idxd);
359 	if (rc) {
360 		goto err_group_cfg;
361 	}
362 
363 	rc = idxd_wq_config(user_idxd);
364 	if (rc) {
365 		goto err_wq_cfg;
366 	}
367 
368 	/*
369 	 * Enable the device
370 	 */
371 	genstatus_reg.raw = _idxd_read_4(idxd, IDXD_GENSTATUS_OFFSET);
372 	assert(genstatus_reg.state == IDXD_DEVICE_STATE_DISABLED);
373 
374 	_idxd_write_4(idxd, IDXD_CMD_OFFSET, IDXD_ENABLE_DEV << IDXD_CMD_SHIFT);
375 	rc = idxd_wait_cmd(idxd, IDXD_REGISTER_TIMEOUT_US);
376 	genstatus_reg.raw = _idxd_read_4(idxd, IDXD_GENSTATUS_OFFSET);
377 	if ((rc < 0) || (genstatus_reg.state != IDXD_DEVICE_STATE_ENABLED)) {
378 		rc = -EINVAL;
379 		SPDK_ERRLOG("Error enabling device %u\n", rc);
380 		goto err_device_enable;
381 	}
382 
383 	genstatus_reg.raw = spdk_mmio_read_4((uint32_t *)(user_idxd->reg_base + IDXD_GENSTATUS_OFFSET));
384 	assert(genstatus_reg.state == IDXD_DEVICE_STATE_ENABLED);
385 
386 	/*
387 	 * Enable the work queues that we've configured
388 	 */
389 	for (i = 0; i < g_user_dev_cfg.total_wqs; i++) {
390 		_idxd_write_4(idxd, IDXD_CMD_OFFSET,
391 			      (IDXD_ENABLE_WQ << IDXD_CMD_SHIFT) | i);
392 		rc = idxd_wait_cmd(idxd, IDXD_REGISTER_TIMEOUT_US);
393 		if (rc < 0) {
394 			SPDK_ERRLOG("Error enabling work queues 0x%x\n", rc);
395 			goto err_wq_enable;
396 		}
397 	}
398 
399 	if ((rc == 0) && (genstatus_reg.state == IDXD_DEVICE_STATE_ENABLED)) {
400 		SPDK_NOTICELOG("Device enabled, version 0x%x gencap: 0x%lx\n",
401 			       user_idxd->registers.version,
402 			       user_idxd->registers.gencap.raw);
403 
404 	}
405 
406 	return rc;
407 err_wq_enable:
408 err_device_enable:
409 	free(idxd->queues);
410 err_wq_cfg:
411 	free(idxd->groups);
412 err_group_cfg:
413 err_reset:
414 	idxd_unmap_pci_bar(idxd, IDXD_MMIO_BAR);
415 	idxd_unmap_pci_bar(idxd, IDXD_MMIO_BAR);
416 
417 	return rc;
418 }
419 
420 static void
421 user_idxd_device_destruct(struct spdk_idxd_device *idxd)
422 {
423 	struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd);
424 
425 	idxd_unmap_pci_bar(idxd, IDXD_MMIO_BAR);
426 	idxd_unmap_pci_bar(idxd, IDXD_WQ_BAR);
427 	free(idxd->groups);
428 	free(idxd->queues);
429 
430 	spdk_pci_device_detach(user_idxd->device);
431 	free(user_idxd);
432 }
433 
434 struct idxd_enum_ctx {
435 	spdk_idxd_probe_cb probe_cb;
436 	spdk_idxd_attach_cb attach_cb;
437 	void *cb_ctx;
438 };
439 
440 /* This function must only be called while holding g_driver_lock */
441 static int
442 idxd_enum_cb(void *ctx, struct spdk_pci_device *pci_dev)
443 {
444 	struct idxd_enum_ctx *enum_ctx = ctx;
445 	struct spdk_idxd_device *idxd;
446 
447 	if (enum_ctx->probe_cb(enum_ctx->cb_ctx, pci_dev)) {
448 		idxd = idxd_attach(pci_dev);
449 		if (idxd == NULL) {
450 			SPDK_ERRLOG("idxd_attach() failed\n");
451 			return -EINVAL;
452 		}
453 
454 		enum_ctx->attach_cb(enum_ctx->cb_ctx, idxd);
455 	}
456 
457 	return 0;
458 }
459 
460 
461 static bool
462 probe_cb(void *cb_ctx, struct spdk_pci_device *pci_dev)
463 {
464 	struct spdk_pci_addr pci_addr = spdk_pci_device_get_addr(pci_dev);
465 
466 	SPDK_NOTICELOG(
467 		" Found matching device at %04x:%02x:%02x.%x vendor:0x%04x device:0x%04x\n",
468 		pci_addr.domain,
469 		pci_addr.bus,
470 		pci_addr.dev,
471 		pci_addr.func,
472 		spdk_pci_device_get_vendor_id(pci_dev),
473 		spdk_pci_device_get_device_id(pci_dev));
474 
475 	/* Claim the device in case conflict with other process */
476 	if (spdk_pci_device_claim(pci_dev) < 0) {
477 		return false;
478 	}
479 
480 	return true;
481 }
482 
483 static int
484 user_idxd_probe(void *cb_ctx, spdk_idxd_attach_cb attach_cb)
485 {
486 	int rc;
487 	struct idxd_enum_ctx enum_ctx;
488 
489 	enum_ctx.probe_cb = probe_cb;
490 	enum_ctx.attach_cb = attach_cb;
491 	enum_ctx.cb_ctx = cb_ctx;
492 
493 	pthread_mutex_lock(&g_driver_lock);
494 	rc = spdk_pci_enumerate(spdk_pci_idxd_get_driver(), idxd_enum_cb, &enum_ctx);
495 	pthread_mutex_unlock(&g_driver_lock);
496 
497 	return rc;
498 }
499 
500 static void
501 user_idxd_dump_sw_err(struct spdk_idxd_device *idxd, void *portal)
502 {
503 	uint64_t sw_error_0;
504 	uint16_t i;
505 
506 	sw_error_0 = _idxd_read_8(idxd, IDXD_SWERR_OFFSET);
507 
508 	SPDK_NOTICELOG("SW Error bits set:");
509 	for (i = 0; i < CHAR_BIT; i++) {
510 		if ((1ULL << i) & sw_error_0) {
511 			SPDK_NOTICELOG("    %d\n", i);
512 		}
513 	}
514 	SPDK_NOTICELOG("SW Error error code: %#x\n", (uint8_t)(sw_error_0 >> 8));
515 	SPDK_NOTICELOG("SW Error WQ index: %u\n", (uint8_t)(sw_error_0 >> 16));
516 	SPDK_NOTICELOG("SW Error Operation: %u\n", (uint8_t)(sw_error_0 >> 32));
517 }
518 
519 static char *
520 user_idxd_portal_get_addr(struct spdk_idxd_device *idxd)
521 {
522 	return (char *)idxd->portals + idxd->wq_id * PORTAL_SIZE;
523 }
524 
525 static bool
526 user_idxd_nop_check(struct spdk_idxd_device *idxd)
527 {
528 	struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd);
529 
530 	/* TODO: temp workaround for simulator.  Remove this function when fixed or w/silicon. */
531 	if (user_idxd->registers.gencap.raw == 0x1833f011f) {
532 		return true;
533 	}
534 
535 	return false;
536 }
537 
538 static struct spdk_idxd_impl g_user_idxd_impl = {
539 	.name			= "user",
540 	.set_config		= user_idxd_set_config,
541 	.probe			= user_idxd_probe,
542 	.destruct		= user_idxd_device_destruct,
543 	.dump_sw_error		= user_idxd_dump_sw_err,
544 	.portal_get_addr	= user_idxd_portal_get_addr,
545 	.nop_check		= user_idxd_nop_check,
546 };
547 
548 /* Caller must hold g_driver_lock */
549 static struct spdk_idxd_device *
550 idxd_attach(struct spdk_pci_device *device)
551 {
552 	struct spdk_user_idxd_device *user_idxd;
553 	struct spdk_idxd_device *idxd;
554 	uint32_t cmd_reg;
555 	int rc;
556 
557 	user_idxd = calloc(1, sizeof(struct spdk_user_idxd_device));
558 	if (user_idxd == NULL) {
559 		SPDK_ERRLOG("Failed to allocate memory for user_idxd device.\n");
560 		return NULL;
561 	}
562 
563 	idxd = &user_idxd->idxd;
564 	user_idxd->device = device;
565 	idxd->impl = &g_user_idxd_impl;
566 	pthread_mutex_init(&idxd->num_channels_lock, NULL);
567 
568 	/* Enable PCI busmaster. */
569 	spdk_pci_device_cfg_read32(device, &cmd_reg, 4);
570 	cmd_reg |= 0x4;
571 	spdk_pci_device_cfg_write32(device, cmd_reg, 4);
572 
573 	rc = idxd_device_configure(user_idxd);
574 	if (rc) {
575 		goto err;
576 	}
577 
578 	return idxd;
579 err:
580 	user_idxd_device_destruct(idxd);
581 	return NULL;
582 }
583 
584 SPDK_IDXD_IMPL_REGISTER(user, &g_user_idxd_impl);
585