xref: /spdk/lib/idxd/idxd_user.c (revision a4009e7ad3d0aa0cfda4ce321e22161cbd1a26dc)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2021 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 
8 #include "spdk/env.h"
9 #include "spdk/util.h"
10 #include "spdk/memory.h"
11 #include "spdk/likely.h"
12 
13 #include "spdk/log.h"
14 #include "spdk_internal/idxd.h"
15 
16 #include "idxd_internal.h"
17 
18 struct spdk_user_idxd_device {
19 	struct spdk_idxd_device	idxd;
20 	struct spdk_pci_device	*device;
21 	int			sock_id;
22 	struct idxd_registers	*registers;
23 };
24 
25 #define __user_idxd(idxd) (struct spdk_user_idxd_device *)idxd
26 
27 pthread_mutex_t	g_driver_lock = PTHREAD_MUTEX_INITIALIZER;
28 
29 static struct spdk_idxd_device *idxd_attach(struct spdk_pci_device *device);
30 
31 /* Used for control commands, not for descriptor submission. */
32 static int
33 idxd_wait_cmd(struct spdk_user_idxd_device *user_idxd, int _timeout)
34 {
35 	uint32_t timeout = _timeout;
36 	union idxd_cmdsts_register cmd_status = {};
37 
38 	cmd_status.raw = spdk_mmio_read_4(&user_idxd->registers->cmdsts.raw);
39 	while (cmd_status.active && --timeout) {
40 		usleep(1);
41 		cmd_status.raw = spdk_mmio_read_4(&user_idxd->registers->cmdsts.raw);
42 	}
43 
44 	/* Check for timeout */
45 	if (timeout == 0 && cmd_status.active) {
46 		SPDK_ERRLOG("Command timeout, waited %u\n", _timeout);
47 		return -EBUSY;
48 	}
49 
50 	/* Check for error */
51 	if (cmd_status.err) {
52 		SPDK_ERRLOG("Command status reg reports error 0x%x\n", cmd_status.err);
53 		return -EINVAL;
54 	}
55 
56 	return 0;
57 }
58 
59 static int
60 idxd_unmap_pci_bar(struct spdk_user_idxd_device *user_idxd, int bar)
61 {
62 	int rc = 0;
63 	void *addr = NULL;
64 
65 	if (bar == IDXD_MMIO_BAR) {
66 		addr = (void *)user_idxd->registers;
67 	} else if (bar == IDXD_WQ_BAR) {
68 		addr = (void *)user_idxd->idxd.portal;
69 	}
70 
71 	if (addr) {
72 		rc = spdk_pci_device_unmap_bar(user_idxd->device, 0, addr);
73 	}
74 	return rc;
75 }
76 
77 static int
78 idxd_map_pci_bars(struct spdk_user_idxd_device *user_idxd)
79 {
80 	int rc;
81 	void *addr;
82 	uint64_t phys_addr, size;
83 
84 	rc = spdk_pci_device_map_bar(user_idxd->device, IDXD_MMIO_BAR, &addr, &phys_addr, &size);
85 	if (rc != 0 || addr == NULL) {
86 		SPDK_ERRLOG("pci_device_map_range failed with error code %d\n", rc);
87 		return -1;
88 	}
89 	user_idxd->registers = (struct idxd_registers *)addr;
90 
91 	rc = spdk_pci_device_map_bar(user_idxd->device, IDXD_WQ_BAR, &addr, &phys_addr, &size);
92 	if (rc != 0 || addr == NULL) {
93 		SPDK_ERRLOG("pci_device_map_range failed with error code %d\n", rc);
94 		rc = idxd_unmap_pci_bar(user_idxd, IDXD_MMIO_BAR);
95 		if (rc) {
96 			SPDK_ERRLOG("unable to unmap MMIO bar\n");
97 		}
98 		return -EINVAL;
99 	}
100 	user_idxd->idxd.portal = addr;
101 
102 	return 0;
103 }
104 
105 static void
106 idxd_disable_dev(struct spdk_user_idxd_device *user_idxd)
107 {
108 	int rc;
109 	union idxd_cmd_register cmd = {};
110 
111 	cmd.command_code = IDXD_DISABLE_DEV;
112 
113 	assert(&user_idxd->registers->cmd.raw); /* scan-build */
114 	spdk_mmio_write_4(&user_idxd->registers->cmd.raw, cmd.raw);
115 	rc = idxd_wait_cmd(user_idxd, IDXD_REGISTER_TIMEOUT_US);
116 	if (rc < 0) {
117 		SPDK_ERRLOG("Error disabling device %u\n", rc);
118 	}
119 }
120 
121 static int
122 idxd_reset_dev(struct spdk_user_idxd_device *user_idxd)
123 {
124 	int rc;
125 	union idxd_cmd_register cmd = {};
126 
127 	cmd.command_code = IDXD_RESET_DEVICE;
128 
129 	spdk_mmio_write_4(&user_idxd->registers->cmd.raw, cmd.raw);
130 	rc = idxd_wait_cmd(user_idxd, IDXD_REGISTER_TIMEOUT_US);
131 	if (rc < 0) {
132 		SPDK_ERRLOG("Error resetting device %u\n", rc);
133 	}
134 
135 	return rc;
136 }
137 
138 static int
139 idxd_group_config(struct spdk_user_idxd_device *user_idxd)
140 {
141 	int i;
142 	union idxd_groupcap_register groupcap;
143 	union idxd_enginecap_register enginecap;
144 	union idxd_wqcap_register wqcap;
145 	union idxd_offsets_register table_offsets;
146 
147 	struct idxd_grptbl *grptbl;
148 	struct idxd_grpcfg grpcfg = {};
149 
150 	groupcap.raw = spdk_mmio_read_8(&user_idxd->registers->groupcap.raw);
151 	enginecap.raw = spdk_mmio_read_8(&user_idxd->registers->enginecap.raw);
152 	wqcap.raw = spdk_mmio_read_8(&user_idxd->registers->wqcap.raw);
153 
154 	if (wqcap.num_wqs < 1) {
155 		return -ENOTSUP;
156 	}
157 
158 	/* Build one group with all of the engines and a single work queue. */
159 	grpcfg.wqs[0] = 1;
160 	grpcfg.flags.read_buffers_allowed = groupcap.read_bufs;
161 	grpcfg.flags.tc_a = 1;
162 	grpcfg.flags.tc_b = 1;
163 	for (i = 0; i < enginecap.num_engines; i++) {
164 		grpcfg.engines |= (1 << i);
165 	}
166 
167 	table_offsets.raw[0] = spdk_mmio_read_8(&user_idxd->registers->offsets.raw[0]);
168 	table_offsets.raw[1] = spdk_mmio_read_8(&user_idxd->registers->offsets.raw[1]);
169 
170 	grptbl = (struct idxd_grptbl *)((uint8_t *)user_idxd->registers + (table_offsets.grpcfg *
171 					IDXD_TABLE_OFFSET_MULT));
172 
173 	/* Write the group we've configured */
174 	spdk_mmio_write_8(&grptbl->group[0].wqs[0], grpcfg.wqs[0]);
175 	spdk_mmio_write_8(&grptbl->group[0].wqs[1], 0);
176 	spdk_mmio_write_8(&grptbl->group[0].wqs[2], 0);
177 	spdk_mmio_write_8(&grptbl->group[0].wqs[3], 0);
178 	spdk_mmio_write_8(&grptbl->group[0].engines, grpcfg.engines);
179 	spdk_mmio_write_4(&grptbl->group[0].flags.raw, grpcfg.flags.raw);
180 
181 	/* Write zeroes to the rest of the groups */
182 	for (i = 1 ; i < groupcap.num_groups; i++) {
183 		spdk_mmio_write_8(&grptbl->group[i].wqs[0], 0L);
184 		spdk_mmio_write_8(&grptbl->group[i].wqs[1], 0L);
185 		spdk_mmio_write_8(&grptbl->group[i].wqs[2], 0L);
186 		spdk_mmio_write_8(&grptbl->group[i].wqs[3], 0L);
187 		spdk_mmio_write_8(&grptbl->group[i].engines, 0L);
188 		spdk_mmio_write_4(&grptbl->group[i].flags.raw, 0L);
189 	}
190 
191 	return 0;
192 }
193 
194 static int
195 idxd_wq_config(struct spdk_user_idxd_device *user_idxd)
196 {
197 	uint32_t i;
198 	struct spdk_idxd_device *idxd = &user_idxd->idxd;
199 	union idxd_wqcap_register wqcap;
200 	union idxd_offsets_register table_offsets;
201 	union idxd_wqcfg *wqcfg;
202 
203 	wqcap.raw = spdk_mmio_read_8(&user_idxd->registers->wqcap.raw);
204 
205 	SPDK_DEBUGLOG(idxd, "Total ring slots available 0x%x\n", wqcap.total_wq_size);
206 
207 	idxd->total_wq_size = wqcap.total_wq_size;
208 	/* Spread the channels we allow per device based on the total number of WQE to try
209 	 * and achieve optimal performance for common cases.
210 	 */
211 	idxd->chan_per_device = (idxd->total_wq_size >= 128) ? 8 : 4;
212 
213 	table_offsets.raw[0] = spdk_mmio_read_8(&user_idxd->registers->offsets.raw[0]);
214 	table_offsets.raw[1] = spdk_mmio_read_8(&user_idxd->registers->offsets.raw[1]);
215 
216 	wqcfg = (union idxd_wqcfg *)((uint8_t *)user_idxd->registers + (table_offsets.wqcfg *
217 				     IDXD_TABLE_OFFSET_MULT));
218 
219 	for (i = 0 ; i < SPDK_COUNTOF(wqcfg->raw); i++) {
220 		wqcfg->raw[i] = spdk_mmio_read_4(&wqcfg->raw[i]);
221 	}
222 
223 	wqcfg->wq_size = wqcap.total_wq_size;
224 	wqcfg->mode = WQ_MODE_DEDICATED;
225 	wqcfg->max_batch_shift = LOG2_WQ_MAX_BATCH;
226 	wqcfg->max_xfer_shift = LOG2_WQ_MAX_XFER;
227 	wqcfg->wq_state = WQ_ENABLED;
228 	wqcfg->priority = WQ_PRIORITY_1;
229 
230 	for (i = 0; i < SPDK_COUNTOF(wqcfg->raw); i++) {
231 		spdk_mmio_write_4(&wqcfg->raw[i], wqcfg->raw[i]);
232 	}
233 
234 	return 0;
235 }
236 
237 static int
238 idxd_device_configure(struct spdk_user_idxd_device *user_idxd)
239 {
240 	int rc = 0;
241 	union idxd_gensts_register gensts_reg;
242 	union idxd_cmd_register cmd = {};
243 
244 	/*
245 	 * Map BAR0 and BAR2
246 	 */
247 	rc = idxd_map_pci_bars(user_idxd);
248 	if (rc) {
249 		return rc;
250 	}
251 
252 	/*
253 	 * Reset the device
254 	 */
255 	rc = idxd_reset_dev(user_idxd);
256 	if (rc) {
257 		goto err_reset;
258 	}
259 
260 	/*
261 	 * Save the device version for use in the common library code.
262 	 */
263 	user_idxd->idxd.version = user_idxd->registers->version;
264 
265 	/*
266 	 * Configure groups and work queues.
267 	 */
268 	rc = idxd_group_config(user_idxd);
269 	if (rc) {
270 		goto err_group_cfg;
271 	}
272 
273 	rc = idxd_wq_config(user_idxd);
274 	if (rc) {
275 		goto err_wq_cfg;
276 	}
277 
278 	/*
279 	 * Enable the device
280 	 */
281 	gensts_reg.raw = spdk_mmio_read_4(&user_idxd->registers->gensts.raw);
282 	assert(gensts_reg.state == IDXD_DEVICE_STATE_DISABLED);
283 
284 	cmd.command_code = IDXD_ENABLE_DEV;
285 
286 	spdk_mmio_write_4(&user_idxd->registers->cmd.raw, cmd.raw);
287 	rc = idxd_wait_cmd(user_idxd, IDXD_REGISTER_TIMEOUT_US);
288 	gensts_reg.raw = spdk_mmio_read_4(&user_idxd->registers->gensts.raw);
289 	if ((rc < 0) || (gensts_reg.state != IDXD_DEVICE_STATE_ENABLED)) {
290 		rc = -EINVAL;
291 		SPDK_ERRLOG("Error enabling device %u\n", rc);
292 		goto err_device_enable;
293 	}
294 
295 	/*
296 	 * Enable the work queue that we've configured
297 	 */
298 	cmd.command_code = IDXD_ENABLE_WQ;
299 	cmd.operand = 0;
300 
301 	spdk_mmio_write_4(&user_idxd->registers->cmd.raw, cmd.raw);
302 	rc = idxd_wait_cmd(user_idxd, IDXD_REGISTER_TIMEOUT_US);
303 	if (rc < 0) {
304 		SPDK_ERRLOG("Error enabling work queues 0x%x\n", rc);
305 		goto err_wq_enable;
306 	}
307 
308 	if ((rc == 0) && (gensts_reg.state == IDXD_DEVICE_STATE_ENABLED)) {
309 		SPDK_DEBUGLOG(idxd, "Device enabled VID 0x%x DID 0x%x\n",
310 			      user_idxd->device->id.vendor_id, user_idxd->device->id.device_id);
311 	}
312 
313 	return rc;
314 err_wq_enable:
315 err_device_enable:
316 err_wq_cfg:
317 err_group_cfg:
318 err_reset:
319 	idxd_unmap_pci_bar(user_idxd, IDXD_MMIO_BAR);
320 	idxd_unmap_pci_bar(user_idxd, IDXD_MMIO_BAR);
321 
322 	return rc;
323 }
324 
325 static void
326 user_idxd_device_destruct(struct spdk_idxd_device *idxd)
327 {
328 	struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd);
329 
330 	idxd_disable_dev(user_idxd);
331 
332 	idxd_unmap_pci_bar(user_idxd, IDXD_MMIO_BAR);
333 	idxd_unmap_pci_bar(user_idxd, IDXD_WQ_BAR);
334 
335 	spdk_pci_device_detach(user_idxd->device);
336 	if (idxd->type == IDXD_DEV_TYPE_IAA) {
337 		spdk_free(idxd->aecs);
338 	}
339 	free(user_idxd);
340 }
341 
342 struct idxd_enum_ctx {
343 	spdk_idxd_probe_cb probe_cb;
344 	spdk_idxd_attach_cb attach_cb;
345 	void *cb_ctx;
346 };
347 
348 static bool
349 probe_cb(void *cb_ctx, struct spdk_pci_device *pci_dev)
350 {
351 	struct spdk_pci_addr pci_addr __attribute__((unused));
352 
353 	pci_addr = spdk_pci_device_get_addr(pci_dev);
354 
355 	SPDK_DEBUGLOG(idxd,
356 		      " Found matching device at %04x:%02x:%02x.%x vendor:0x%04x device:0x%04x\n",
357 		      pci_addr.domain,
358 		      pci_addr.bus,
359 		      pci_addr.dev,
360 		      pci_addr.func,
361 		      spdk_pci_device_get_vendor_id(pci_dev),
362 		      spdk_pci_device_get_device_id(pci_dev));
363 
364 	/* Claim the device in case conflict with other process */
365 	if (spdk_pci_device_claim(pci_dev) < 0) {
366 		return false;
367 	}
368 
369 	return true;
370 }
371 
372 /* This function must only be called while holding g_driver_lock */
373 static int
374 idxd_enum_cb(void *ctx, struct spdk_pci_device *pci_dev)
375 {
376 	struct idxd_enum_ctx *enum_ctx = ctx;
377 	struct spdk_idxd_device *idxd;
378 
379 	/* Call the user probe_cb to see if they want this device or not, if not
380 	 * skip it with a positive return code.
381 	 */
382 	if (enum_ctx->probe_cb(enum_ctx->cb_ctx, pci_dev) == false) {
383 		return 1;
384 	}
385 
386 	if (probe_cb(enum_ctx->cb_ctx, pci_dev)) {
387 		idxd = idxd_attach(pci_dev);
388 		if (idxd == NULL) {
389 			SPDK_ERRLOG("idxd_attach() failed\n");
390 			return -EINVAL;
391 		}
392 
393 		enum_ctx->attach_cb(enum_ctx->cb_ctx, idxd);
394 	}
395 
396 	return 0;
397 }
398 
399 /* The IDXD driver supports 2 distinct HW units, DSA and IAA. */
400 static int
401 user_idxd_probe(void *cb_ctx, spdk_idxd_attach_cb attach_cb,
402 		spdk_idxd_probe_cb probe_cb)
403 {
404 	int rc;
405 	struct idxd_enum_ctx enum_ctx;
406 
407 	enum_ctx.probe_cb = probe_cb;
408 	enum_ctx.attach_cb = attach_cb;
409 	enum_ctx.cb_ctx = cb_ctx;
410 
411 	pthread_mutex_lock(&g_driver_lock);
412 	rc = spdk_pci_enumerate(spdk_pci_idxd_get_driver(), idxd_enum_cb, &enum_ctx);
413 	pthread_mutex_unlock(&g_driver_lock);
414 	assert(rc == 0);
415 
416 	return rc;
417 }
418 
419 static void
420 user_idxd_dump_sw_err(struct spdk_idxd_device *idxd, void *portal)
421 {
422 	struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd);
423 	union idxd_swerr_register sw_err;
424 	uint16_t i;
425 
426 	SPDK_NOTICELOG("SW Error Raw:");
427 	for (i = 0; i < 4; i++) {
428 		sw_err.raw[i] = spdk_mmio_read_8(&user_idxd->registers->sw_err.raw[i]);
429 		SPDK_NOTICELOG("    0x%lx\n", sw_err.raw[i]);
430 	}
431 
432 	SPDK_NOTICELOG("SW Error error code: %#x\n", (uint8_t)(sw_err.error));
433 	SPDK_NOTICELOG("SW Error WQ index: %u\n", (uint8_t)(sw_err.wq_idx));
434 	SPDK_NOTICELOG("SW Error Operation: %u\n", (uint8_t)(sw_err.operation));
435 }
436 
437 static char *
438 user_idxd_portal_get_addr(struct spdk_idxd_device *idxd)
439 {
440 	return (char *)idxd->portal;
441 }
442 
443 static struct spdk_idxd_impl g_user_idxd_impl = {
444 	.name			= "user",
445 	.probe			= user_idxd_probe,
446 	.destruct		= user_idxd_device_destruct,
447 	.dump_sw_error		= user_idxd_dump_sw_err,
448 	.portal_get_addr	= user_idxd_portal_get_addr
449 };
450 
451 /*
452  * Fixed Huffman tables the IAA hardware requires to implement RFC-1951.
453  */
454 const uint32_t fixed_ll_sym[286] = {
455 	0x40030, 0x40031, 0x40032, 0x40033, 0x40034, 0x40035, 0x40036, 0x40037,
456 	0x40038, 0x40039, 0x4003A, 0x4003B, 0x4003C, 0x4003D, 0x4003E, 0x4003F,
457 	0x40040, 0x40041, 0x40042, 0x40043, 0x40044, 0x40045, 0x40046, 0x40047,
458 	0x40048, 0x40049, 0x4004A, 0x4004B, 0x4004C, 0x4004D, 0x4004E, 0x4004F,
459 	0x40050, 0x40051, 0x40052, 0x40053, 0x40054, 0x40055, 0x40056, 0x40057,
460 	0x40058, 0x40059, 0x4005A, 0x4005B, 0x4005C, 0x4005D, 0x4005E, 0x4005F,
461 	0x40060, 0x40061, 0x40062, 0x40063, 0x40064, 0x40065, 0x40066, 0x40067,
462 	0x40068, 0x40069, 0x4006A, 0x4006B, 0x4006C, 0x4006D, 0x4006E, 0x4006F,
463 	0x40070, 0x40071, 0x40072, 0x40073, 0x40074, 0x40075, 0x40076, 0x40077,
464 	0x40078, 0x40079, 0x4007A, 0x4007B, 0x4007C, 0x4007D, 0x4007E, 0x4007F,
465 	0x40080, 0x40081, 0x40082, 0x40083, 0x40084, 0x40085, 0x40086, 0x40087,
466 	0x40088, 0x40089, 0x4008A, 0x4008B, 0x4008C, 0x4008D, 0x4008E, 0x4008F,
467 	0x40090, 0x40091, 0x40092, 0x40093, 0x40094, 0x40095, 0x40096, 0x40097,
468 	0x40098, 0x40099, 0x4009A, 0x4009B, 0x4009C, 0x4009D, 0x4009E, 0x4009F,
469 	0x400A0, 0x400A1, 0x400A2, 0x400A3, 0x400A4, 0x400A5, 0x400A6, 0x400A7,
470 	0x400A8, 0x400A9, 0x400AA, 0x400AB, 0x400AC, 0x400AD, 0x400AE, 0x400AF,
471 	0x400B0, 0x400B1, 0x400B2, 0x400B3, 0x400B4, 0x400B5, 0x400B6, 0x400B7,
472 	0x400B8, 0x400B9, 0x400BA, 0x400BB, 0x400BC, 0x400BD, 0x400BE, 0x400BF,
473 	0x48190, 0x48191, 0x48192, 0x48193, 0x48194, 0x48195, 0x48196, 0x48197,
474 	0x48198, 0x48199, 0x4819A, 0x4819B, 0x4819C, 0x4819D, 0x4819E, 0x4819F,
475 	0x481A0, 0x481A1, 0x481A2, 0x481A3, 0x481A4, 0x481A5, 0x481A6, 0x481A7,
476 	0x481A8, 0x481A9, 0x481AA, 0x481AB, 0x481AC, 0x481AD, 0x481AE, 0x481AF,
477 	0x481B0, 0x481B1, 0x481B2, 0x481B3, 0x481B4, 0x481B5, 0x481B6, 0x481B7,
478 	0x481B8, 0x481B9, 0x481BA, 0x481BB, 0x481BC, 0x481BD, 0x481BE, 0x481BF,
479 	0x481C0, 0x481C1, 0x481C2, 0x481C3, 0x481C4, 0x481C5, 0x481C6, 0x481C7,
480 	0x481C8, 0x481C9, 0x481CA, 0x481CB, 0x481CC, 0x481CD, 0x481CE, 0x481CF,
481 	0x481D0, 0x481D1, 0x481D2, 0x481D3, 0x481D4, 0x481D5, 0x481D6, 0x481D7,
482 	0x481D8, 0x481D9, 0x481DA, 0x481DB, 0x481DC, 0x481DD, 0x481DE, 0x481DF,
483 	0x481E0, 0x481E1, 0x481E2, 0x481E3, 0x481E4, 0x481E5, 0x481E6, 0x481E7,
484 	0x481E8, 0x481E9, 0x481EA, 0x481EB, 0x481EC, 0x481ED, 0x481EE, 0x481EF,
485 	0x481F0, 0x481F1, 0x481F2, 0x481F3, 0x481F4, 0x481F5, 0x481F6, 0x481F7,
486 	0x481F8, 0x481F9, 0x481FA, 0x481FB, 0x481FC, 0x481FD, 0x481FE, 0x481FF,
487 	0x38000, 0x38001, 0x38002, 0x38003, 0x38004, 0x38005, 0x38006, 0x38007,
488 	0x38008, 0x38009, 0x3800A, 0x3800B, 0x3800C, 0x3800D, 0x3800E, 0x3800F,
489 	0x38010, 0x38011, 0x38012, 0x38013, 0x38014, 0x38015, 0x38016, 0x38017,
490 	0x400C0, 0x400C1, 0x400C2, 0x400C3, 0x400C4, 0x400C5
491 };
492 
493 const uint32_t fixed_d_sym[30] = {
494 	0x28000, 0x28001, 0x28002, 0x28003, 0x28004, 0x28005, 0x28006, 0x28007,
495 	0x28008, 0x28009, 0x2800A, 0x2800B, 0x2800C, 0x2800D, 0x2800E, 0x2800F,
496 	0x28010, 0x28011, 0x28012, 0x28013, 0x28014, 0x28015, 0x28016, 0x28017,
497 	0x28018, 0x28019, 0x2801A, 0x2801B, 0x2801C, 0x2801D
498 };
499 #define DYNAMIC_HDR			0x2
500 #define DYNAMIC_HDR_SIZE		3
501 
502 /* Caller must hold g_driver_lock */
503 static struct spdk_idxd_device *
504 idxd_attach(struct spdk_pci_device *device)
505 {
506 	struct spdk_user_idxd_device *user_idxd;
507 	struct spdk_idxd_device *idxd;
508 	uint16_t did = device->id.device_id;
509 	uint32_t cmd_reg;
510 	uint64_t updated = sizeof(struct iaa_aecs);
511 	int rc;
512 
513 	user_idxd = calloc(1, sizeof(struct spdk_user_idxd_device));
514 	if (user_idxd == NULL) {
515 		SPDK_ERRLOG("Failed to allocate memory for user_idxd device.\n");
516 		return NULL;
517 	}
518 
519 	idxd = &user_idxd->idxd;
520 	if (did == PCI_DEVICE_ID_INTEL_DSA) {
521 		idxd->type = IDXD_DEV_TYPE_DSA;
522 	} else if (did == PCI_DEVICE_ID_INTEL_IAA) {
523 		idxd->type = IDXD_DEV_TYPE_IAA;
524 		idxd->aecs = spdk_zmalloc(sizeof(struct iaa_aecs),
525 					  0x20, NULL,
526 					  SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
527 		if (idxd->aecs == NULL) {
528 			SPDK_ERRLOG("Failed to allocate iaa aecs\n");
529 			goto err;
530 		}
531 
532 		idxd->aecs_addr = spdk_vtophys((void *)idxd->aecs, &updated);
533 		if (idxd->aecs_addr == SPDK_VTOPHYS_ERROR || updated < sizeof(struct iaa_aecs)) {
534 			SPDK_ERRLOG("Failed to translate iaa aecs\n");
535 			spdk_free(idxd->aecs);
536 			goto err;
537 		}
538 
539 		/* Configure aecs table using fixed Huffman table */
540 		idxd->aecs->output_accum[0] = DYNAMIC_HDR | 1;
541 		idxd->aecs->num_output_accum_bits = DYNAMIC_HDR_SIZE;
542 
543 		/* Add Huffman table to aecs */
544 		memcpy(idxd->aecs->ll_sym, fixed_ll_sym, sizeof(fixed_ll_sym));
545 		memcpy(idxd->aecs->d_sym, fixed_d_sym, sizeof(fixed_d_sym));
546 	}
547 
548 	user_idxd->device = device;
549 	idxd->impl = &g_user_idxd_impl;
550 	idxd->socket_id = device->socket_id;
551 	pthread_mutex_init(&idxd->num_channels_lock, NULL);
552 
553 	/* Enable PCI busmaster. */
554 	spdk_pci_device_cfg_read32(device, &cmd_reg, 4);
555 	cmd_reg |= 0x4;
556 	spdk_pci_device_cfg_write32(device, cmd_reg, 4);
557 
558 	rc = idxd_device_configure(user_idxd);
559 	if (rc) {
560 		goto err;
561 	}
562 
563 	return idxd;
564 err:
565 	user_idxd_device_destruct(idxd);
566 	return NULL;
567 }
568 
569 SPDK_IDXD_IMPL_REGISTER(user, &g_user_idxd_impl);
570