1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2021 Intel Corporation.
3 * All rights reserved.
4 */
5
6 #include "spdk/stdinc.h"
7
8 #include "spdk/env.h"
9 #include "spdk/util.h"
10 #include "spdk/memory.h"
11 #include "spdk/likely.h"
12
13 #include "spdk/log.h"
14 #include "spdk_internal/idxd.h"
15
16 #include "idxd_internal.h"
17
18 struct spdk_user_idxd_device {
19 struct spdk_idxd_device idxd;
20 struct spdk_pci_device *device;
21 int sock_id;
22 struct idxd_registers *registers;
23 };
24
25 #define __user_idxd(idxd) (struct spdk_user_idxd_device *)idxd
26
27 pthread_mutex_t g_driver_lock = PTHREAD_MUTEX_INITIALIZER;
28
29 static struct spdk_idxd_device *idxd_attach(struct spdk_pci_device *device);
30
31 /* Used for control commands, not for descriptor submission. */
32 static int
idxd_wait_cmd(struct spdk_user_idxd_device * user_idxd,int _timeout)33 idxd_wait_cmd(struct spdk_user_idxd_device *user_idxd, int _timeout)
34 {
35 uint32_t timeout = _timeout;
36 union idxd_cmdsts_register cmd_status = {};
37
38 cmd_status.raw = spdk_mmio_read_4(&user_idxd->registers->cmdsts.raw);
39 while (cmd_status.active && --timeout) {
40 usleep(1);
41 cmd_status.raw = spdk_mmio_read_4(&user_idxd->registers->cmdsts.raw);
42 }
43
44 /* Check for timeout */
45 if (timeout == 0 && cmd_status.active) {
46 SPDK_ERRLOG("Command timeout, waited %u\n", _timeout);
47 return -EBUSY;
48 }
49
50 /* Check for error */
51 if (cmd_status.err) {
52 SPDK_ERRLOG("Command status reg reports error 0x%x\n", cmd_status.err);
53 return -EINVAL;
54 }
55
56 return 0;
57 }
58
59 static int
idxd_unmap_pci_bar(struct spdk_user_idxd_device * user_idxd,int bar)60 idxd_unmap_pci_bar(struct spdk_user_idxd_device *user_idxd, int bar)
61 {
62 int rc = 0;
63 void *addr = NULL;
64
65 if (bar == IDXD_MMIO_BAR) {
66 addr = (void *)user_idxd->registers;
67 } else if (bar == IDXD_WQ_BAR) {
68 addr = (void *)user_idxd->idxd.portal;
69 }
70
71 if (addr) {
72 rc = spdk_pci_device_unmap_bar(user_idxd->device, 0, addr);
73 }
74 return rc;
75 }
76
77 static int
idxd_map_pci_bars(struct spdk_user_idxd_device * user_idxd)78 idxd_map_pci_bars(struct spdk_user_idxd_device *user_idxd)
79 {
80 int rc;
81 void *addr;
82 uint64_t phys_addr, size;
83
84 rc = spdk_pci_device_map_bar(user_idxd->device, IDXD_MMIO_BAR, &addr, &phys_addr, &size);
85 if (rc != 0 || addr == NULL) {
86 SPDK_ERRLOG("pci_device_map_range failed with error code %d\n", rc);
87 return -1;
88 }
89 user_idxd->registers = (struct idxd_registers *)addr;
90
91 rc = spdk_pci_device_map_bar(user_idxd->device, IDXD_WQ_BAR, &addr, &phys_addr, &size);
92 if (rc != 0 || addr == NULL) {
93 SPDK_ERRLOG("pci_device_map_range failed with error code %d\n", rc);
94 rc = idxd_unmap_pci_bar(user_idxd, IDXD_MMIO_BAR);
95 if (rc) {
96 SPDK_ERRLOG("unable to unmap MMIO bar\n");
97 }
98 return -EINVAL;
99 }
100 user_idxd->idxd.portal = addr;
101
102 return 0;
103 }
104
105 static void
idxd_disable_dev(struct spdk_user_idxd_device * user_idxd)106 idxd_disable_dev(struct spdk_user_idxd_device *user_idxd)
107 {
108 int rc;
109 union idxd_cmd_register cmd = {};
110
111 cmd.command_code = IDXD_DISABLE_DEV;
112
113 assert(&user_idxd->registers->cmd.raw); /* scan-build */
114 spdk_mmio_write_4(&user_idxd->registers->cmd.raw, cmd.raw);
115 rc = idxd_wait_cmd(user_idxd, IDXD_REGISTER_TIMEOUT_US);
116 if (rc < 0) {
117 SPDK_ERRLOG("Error disabling device %u\n", rc);
118 }
119 }
120
121 static int
idxd_reset_dev(struct spdk_user_idxd_device * user_idxd)122 idxd_reset_dev(struct spdk_user_idxd_device *user_idxd)
123 {
124 int rc;
125 union idxd_cmd_register cmd = {};
126
127 cmd.command_code = IDXD_RESET_DEVICE;
128
129 spdk_mmio_write_4(&user_idxd->registers->cmd.raw, cmd.raw);
130 rc = idxd_wait_cmd(user_idxd, IDXD_REGISTER_TIMEOUT_US);
131 if (rc < 0) {
132 SPDK_ERRLOG("Error resetting device %u\n", rc);
133 }
134
135 return rc;
136 }
137
138 static int
idxd_group_config(struct spdk_user_idxd_device * user_idxd)139 idxd_group_config(struct spdk_user_idxd_device *user_idxd)
140 {
141 int i;
142 union idxd_groupcap_register groupcap;
143 union idxd_enginecap_register enginecap;
144 union idxd_wqcap_register wqcap;
145 union idxd_offsets_register table_offsets;
146
147 struct idxd_grptbl *grptbl;
148 struct idxd_grpcfg grpcfg = {};
149
150 groupcap.raw = spdk_mmio_read_8(&user_idxd->registers->groupcap.raw);
151 enginecap.raw = spdk_mmio_read_8(&user_idxd->registers->enginecap.raw);
152 wqcap.raw = spdk_mmio_read_8(&user_idxd->registers->wqcap.raw);
153
154 if (wqcap.num_wqs < 1) {
155 return -ENOTSUP;
156 }
157
158 /* Build one group with all of the engines and a single work queue. */
159 grpcfg.wqs[0] = 1;
160 grpcfg.flags.read_buffers_allowed = groupcap.read_bufs;
161 grpcfg.flags.tc_a = 1;
162 grpcfg.flags.tc_b = 1;
163 for (i = 0; i < enginecap.num_engines; i++) {
164 grpcfg.engines |= (1 << i);
165 }
166
167 table_offsets.raw[0] = spdk_mmio_read_8(&user_idxd->registers->offsets.raw[0]);
168 table_offsets.raw[1] = spdk_mmio_read_8(&user_idxd->registers->offsets.raw[1]);
169
170 grptbl = (struct idxd_grptbl *)((uint8_t *)user_idxd->registers + (table_offsets.grpcfg *
171 IDXD_TABLE_OFFSET_MULT));
172
173 /* Write the group we've configured */
174 spdk_mmio_write_8(&grptbl->group[0].wqs[0], grpcfg.wqs[0]);
175 spdk_mmio_write_8(&grptbl->group[0].wqs[1], 0);
176 spdk_mmio_write_8(&grptbl->group[0].wqs[2], 0);
177 spdk_mmio_write_8(&grptbl->group[0].wqs[3], 0);
178 spdk_mmio_write_8(&grptbl->group[0].engines, grpcfg.engines);
179 spdk_mmio_write_4(&grptbl->group[0].flags.raw, grpcfg.flags.raw);
180
181 /* Write zeroes to the rest of the groups */
182 for (i = 1 ; i < groupcap.num_groups; i++) {
183 spdk_mmio_write_8(&grptbl->group[i].wqs[0], 0L);
184 spdk_mmio_write_8(&grptbl->group[i].wqs[1], 0L);
185 spdk_mmio_write_8(&grptbl->group[i].wqs[2], 0L);
186 spdk_mmio_write_8(&grptbl->group[i].wqs[3], 0L);
187 spdk_mmio_write_8(&grptbl->group[i].engines, 0L);
188 spdk_mmio_write_4(&grptbl->group[i].flags.raw, 0L);
189 }
190
191 return 0;
192 }
193
194 static int
idxd_wq_config(struct spdk_user_idxd_device * user_idxd)195 idxd_wq_config(struct spdk_user_idxd_device *user_idxd)
196 {
197 uint32_t i;
198 struct spdk_idxd_device *idxd = &user_idxd->idxd;
199 union idxd_wqcap_register wqcap;
200 union idxd_offsets_register table_offsets;
201 union idxd_wqcfg *wqcfg;
202
203 wqcap.raw = spdk_mmio_read_8(&user_idxd->registers->wqcap.raw);
204
205 SPDK_DEBUGLOG(idxd, "Total ring slots available 0x%x\n", wqcap.total_wq_size);
206
207 idxd->total_wq_size = wqcap.total_wq_size;
208 /* Spread the channels we allow per device based on the total number of WQE to try
209 * and achieve optimal performance for common cases.
210 */
211 idxd->chan_per_device = (idxd->total_wq_size >= 128) ? 8 : 4;
212
213 table_offsets.raw[0] = spdk_mmio_read_8(&user_idxd->registers->offsets.raw[0]);
214 table_offsets.raw[1] = spdk_mmio_read_8(&user_idxd->registers->offsets.raw[1]);
215
216 wqcfg = (union idxd_wqcfg *)((uint8_t *)user_idxd->registers + (table_offsets.wqcfg *
217 IDXD_TABLE_OFFSET_MULT));
218
219 for (i = 0 ; i < SPDK_COUNTOF(wqcfg->raw); i++) {
220 wqcfg->raw[i] = spdk_mmio_read_4(&wqcfg->raw[i]);
221 }
222
223 wqcfg->wq_size = wqcap.total_wq_size;
224 wqcfg->mode = WQ_MODE_DEDICATED;
225 wqcfg->max_batch_shift = user_idxd->registers->gencap.max_batch_shift;
226 wqcfg->max_xfer_shift = LOG2_WQ_MAX_XFER;
227 wqcfg->wq_state = WQ_ENABLED;
228 wqcfg->priority = WQ_PRIORITY_1;
229
230 idxd->batch_size = (1 << wqcfg->max_batch_shift);
231
232 for (i = 0; i < SPDK_COUNTOF(wqcfg->raw); i++) {
233 spdk_mmio_write_4(&wqcfg->raw[i], wqcfg->raw[i]);
234 }
235
236 return 0;
237 }
238
239 static int
idxd_device_configure(struct spdk_user_idxd_device * user_idxd)240 idxd_device_configure(struct spdk_user_idxd_device *user_idxd)
241 {
242 int rc = 0;
243 union idxd_gensts_register gensts_reg;
244 union idxd_cmd_register cmd = {};
245
246 /*
247 * Map BAR0 and BAR2
248 */
249 rc = idxd_map_pci_bars(user_idxd);
250 if (rc) {
251 return rc;
252 }
253
254 /*
255 * Reset the device
256 */
257 rc = idxd_reset_dev(user_idxd);
258 if (rc) {
259 goto err_reset;
260 }
261
262 /*
263 * Save the device version for use in the common library code.
264 */
265 user_idxd->idxd.version = user_idxd->registers->version;
266
267 /*
268 * Configure groups and work queues.
269 */
270 rc = idxd_group_config(user_idxd);
271 if (rc) {
272 goto err_group_cfg;
273 }
274
275 rc = idxd_wq_config(user_idxd);
276 if (rc) {
277 goto err_wq_cfg;
278 }
279
280 /*
281 * Enable the device
282 */
283 gensts_reg.raw = spdk_mmio_read_4(&user_idxd->registers->gensts.raw);
284 assert(gensts_reg.state == IDXD_DEVICE_STATE_DISABLED);
285
286 cmd.command_code = IDXD_ENABLE_DEV;
287
288 spdk_mmio_write_4(&user_idxd->registers->cmd.raw, cmd.raw);
289 rc = idxd_wait_cmd(user_idxd, IDXD_REGISTER_TIMEOUT_US);
290 gensts_reg.raw = spdk_mmio_read_4(&user_idxd->registers->gensts.raw);
291 if ((rc < 0) || (gensts_reg.state != IDXD_DEVICE_STATE_ENABLED)) {
292 rc = -EINVAL;
293 SPDK_ERRLOG("Error enabling device %u\n", rc);
294 goto err_device_enable;
295 }
296
297 /*
298 * Enable the work queue that we've configured
299 */
300 cmd.command_code = IDXD_ENABLE_WQ;
301 cmd.operand = 0;
302
303 spdk_mmio_write_4(&user_idxd->registers->cmd.raw, cmd.raw);
304 rc = idxd_wait_cmd(user_idxd, IDXD_REGISTER_TIMEOUT_US);
305 if (rc < 0) {
306 SPDK_ERRLOG("Error enabling work queues 0x%x\n", rc);
307 goto err_wq_enable;
308 }
309
310 if ((rc == 0) && (gensts_reg.state == IDXD_DEVICE_STATE_ENABLED)) {
311 SPDK_DEBUGLOG(idxd, "Device enabled VID 0x%x DID 0x%x\n",
312 user_idxd->device->id.vendor_id, user_idxd->device->id.device_id);
313 }
314
315 return rc;
316 err_wq_enable:
317 err_device_enable:
318 err_wq_cfg:
319 err_group_cfg:
320 err_reset:
321 idxd_unmap_pci_bar(user_idxd, IDXD_MMIO_BAR);
322 idxd_unmap_pci_bar(user_idxd, IDXD_MMIO_BAR);
323
324 return rc;
325 }
326
327 static void
user_idxd_device_destruct(struct spdk_idxd_device * idxd)328 user_idxd_device_destruct(struct spdk_idxd_device *idxd)
329 {
330 struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd);
331
332 idxd_disable_dev(user_idxd);
333
334 idxd_unmap_pci_bar(user_idxd, IDXD_MMIO_BAR);
335 idxd_unmap_pci_bar(user_idxd, IDXD_WQ_BAR);
336
337 spdk_pci_device_detach(user_idxd->device);
338 if (idxd->type == IDXD_DEV_TYPE_IAA) {
339 spdk_free(idxd->aecs);
340 }
341 free(user_idxd);
342 }
343
344 struct idxd_enum_ctx {
345 spdk_idxd_probe_cb probe_cb;
346 spdk_idxd_attach_cb attach_cb;
347 void *cb_ctx;
348 };
349
350 static bool
probe_cb(void * cb_ctx,struct spdk_pci_device * pci_dev)351 probe_cb(void *cb_ctx, struct spdk_pci_device *pci_dev)
352 {
353 struct spdk_pci_addr pci_addr __attribute__((unused));
354
355 pci_addr = spdk_pci_device_get_addr(pci_dev);
356
357 SPDK_DEBUGLOG(idxd,
358 " Found matching device at %04x:%02x:%02x.%x vendor:0x%04x device:0x%04x\n",
359 pci_addr.domain,
360 pci_addr.bus,
361 pci_addr.dev,
362 pci_addr.func,
363 spdk_pci_device_get_vendor_id(pci_dev),
364 spdk_pci_device_get_device_id(pci_dev));
365
366 /* Claim the device in case conflict with other process */
367 if (spdk_pci_device_claim(pci_dev) < 0) {
368 return false;
369 }
370
371 return true;
372 }
373
374 /* This function must only be called while holding g_driver_lock */
375 static int
idxd_enum_cb(void * ctx,struct spdk_pci_device * pci_dev)376 idxd_enum_cb(void *ctx, struct spdk_pci_device *pci_dev)
377 {
378 struct idxd_enum_ctx *enum_ctx = ctx;
379 struct spdk_idxd_device *idxd;
380
381 /* Call the user probe_cb to see if they want this device or not, if not
382 * skip it with a positive return code.
383 */
384 if (enum_ctx->probe_cb(enum_ctx->cb_ctx, pci_dev) == false) {
385 return 1;
386 }
387
388 if (probe_cb(enum_ctx->cb_ctx, pci_dev)) {
389 idxd = idxd_attach(pci_dev);
390 if (idxd == NULL) {
391 SPDK_ERRLOG("idxd_attach() failed\n");
392 return -EINVAL;
393 }
394
395 enum_ctx->attach_cb(enum_ctx->cb_ctx, idxd);
396 }
397
398 return 0;
399 }
400
401 /* The IDXD driver supports 2 distinct HW units, DSA and IAA. */
402 static int
user_idxd_probe(void * cb_ctx,spdk_idxd_attach_cb attach_cb,spdk_idxd_probe_cb probe_cb)403 user_idxd_probe(void *cb_ctx, spdk_idxd_attach_cb attach_cb,
404 spdk_idxd_probe_cb probe_cb)
405 {
406 int rc;
407 struct idxd_enum_ctx enum_ctx;
408
409 enum_ctx.probe_cb = probe_cb;
410 enum_ctx.attach_cb = attach_cb;
411 enum_ctx.cb_ctx = cb_ctx;
412
413 pthread_mutex_lock(&g_driver_lock);
414 rc = spdk_pci_enumerate(spdk_pci_idxd_get_driver(), idxd_enum_cb, &enum_ctx);
415 pthread_mutex_unlock(&g_driver_lock);
416 assert(rc == 0);
417
418 return rc;
419 }
420
421 static void
user_idxd_dump_sw_err(struct spdk_idxd_device * idxd,void * portal)422 user_idxd_dump_sw_err(struct spdk_idxd_device *idxd, void *portal)
423 {
424 struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd);
425 union idxd_swerr_register sw_err;
426 uint16_t i;
427
428 SPDK_NOTICELOG("SW Error Raw:");
429 for (i = 0; i < 4; i++) {
430 sw_err.raw[i] = spdk_mmio_read_8(&user_idxd->registers->sw_err.raw[i]);
431 SPDK_NOTICELOG(" 0x%lx\n", sw_err.raw[i]);
432 }
433
434 SPDK_NOTICELOG("SW Error error code: %#x\n", (uint8_t)(sw_err.error));
435 SPDK_NOTICELOG("SW Error WQ index: %u\n", (uint8_t)(sw_err.wq_idx));
436 SPDK_NOTICELOG("SW Error Operation: %u\n", (uint8_t)(sw_err.operation));
437 }
438
439 static char *
user_idxd_portal_get_addr(struct spdk_idxd_device * idxd)440 user_idxd_portal_get_addr(struct spdk_idxd_device *idxd)
441 {
442 return (char *)idxd->portal;
443 }
444
445 static struct spdk_idxd_impl g_user_idxd_impl = {
446 .name = "user",
447 .probe = user_idxd_probe,
448 .destruct = user_idxd_device_destruct,
449 .dump_sw_error = user_idxd_dump_sw_err,
450 .portal_get_addr = user_idxd_portal_get_addr
451 };
452
453 /*
454 * Fixed Huffman tables the IAA hardware requires to implement RFC-1951.
455 */
456 const uint32_t fixed_ll_sym[286] = {
457 0x40030, 0x40031, 0x40032, 0x40033, 0x40034, 0x40035, 0x40036, 0x40037,
458 0x40038, 0x40039, 0x4003A, 0x4003B, 0x4003C, 0x4003D, 0x4003E, 0x4003F,
459 0x40040, 0x40041, 0x40042, 0x40043, 0x40044, 0x40045, 0x40046, 0x40047,
460 0x40048, 0x40049, 0x4004A, 0x4004B, 0x4004C, 0x4004D, 0x4004E, 0x4004F,
461 0x40050, 0x40051, 0x40052, 0x40053, 0x40054, 0x40055, 0x40056, 0x40057,
462 0x40058, 0x40059, 0x4005A, 0x4005B, 0x4005C, 0x4005D, 0x4005E, 0x4005F,
463 0x40060, 0x40061, 0x40062, 0x40063, 0x40064, 0x40065, 0x40066, 0x40067,
464 0x40068, 0x40069, 0x4006A, 0x4006B, 0x4006C, 0x4006D, 0x4006E, 0x4006F,
465 0x40070, 0x40071, 0x40072, 0x40073, 0x40074, 0x40075, 0x40076, 0x40077,
466 0x40078, 0x40079, 0x4007A, 0x4007B, 0x4007C, 0x4007D, 0x4007E, 0x4007F,
467 0x40080, 0x40081, 0x40082, 0x40083, 0x40084, 0x40085, 0x40086, 0x40087,
468 0x40088, 0x40089, 0x4008A, 0x4008B, 0x4008C, 0x4008D, 0x4008E, 0x4008F,
469 0x40090, 0x40091, 0x40092, 0x40093, 0x40094, 0x40095, 0x40096, 0x40097,
470 0x40098, 0x40099, 0x4009A, 0x4009B, 0x4009C, 0x4009D, 0x4009E, 0x4009F,
471 0x400A0, 0x400A1, 0x400A2, 0x400A3, 0x400A4, 0x400A5, 0x400A6, 0x400A7,
472 0x400A8, 0x400A9, 0x400AA, 0x400AB, 0x400AC, 0x400AD, 0x400AE, 0x400AF,
473 0x400B0, 0x400B1, 0x400B2, 0x400B3, 0x400B4, 0x400B5, 0x400B6, 0x400B7,
474 0x400B8, 0x400B9, 0x400BA, 0x400BB, 0x400BC, 0x400BD, 0x400BE, 0x400BF,
475 0x48190, 0x48191, 0x48192, 0x48193, 0x48194, 0x48195, 0x48196, 0x48197,
476 0x48198, 0x48199, 0x4819A, 0x4819B, 0x4819C, 0x4819D, 0x4819E, 0x4819F,
477 0x481A0, 0x481A1, 0x481A2, 0x481A3, 0x481A4, 0x481A5, 0x481A6, 0x481A7,
478 0x481A8, 0x481A9, 0x481AA, 0x481AB, 0x481AC, 0x481AD, 0x481AE, 0x481AF,
479 0x481B0, 0x481B1, 0x481B2, 0x481B3, 0x481B4, 0x481B5, 0x481B6, 0x481B7,
480 0x481B8, 0x481B9, 0x481BA, 0x481BB, 0x481BC, 0x481BD, 0x481BE, 0x481BF,
481 0x481C0, 0x481C1, 0x481C2, 0x481C3, 0x481C4, 0x481C5, 0x481C6, 0x481C7,
482 0x481C8, 0x481C9, 0x481CA, 0x481CB, 0x481CC, 0x481CD, 0x481CE, 0x481CF,
483 0x481D0, 0x481D1, 0x481D2, 0x481D3, 0x481D4, 0x481D5, 0x481D6, 0x481D7,
484 0x481D8, 0x481D9, 0x481DA, 0x481DB, 0x481DC, 0x481DD, 0x481DE, 0x481DF,
485 0x481E0, 0x481E1, 0x481E2, 0x481E3, 0x481E4, 0x481E5, 0x481E6, 0x481E7,
486 0x481E8, 0x481E9, 0x481EA, 0x481EB, 0x481EC, 0x481ED, 0x481EE, 0x481EF,
487 0x481F0, 0x481F1, 0x481F2, 0x481F3, 0x481F4, 0x481F5, 0x481F6, 0x481F7,
488 0x481F8, 0x481F9, 0x481FA, 0x481FB, 0x481FC, 0x481FD, 0x481FE, 0x481FF,
489 0x38000, 0x38001, 0x38002, 0x38003, 0x38004, 0x38005, 0x38006, 0x38007,
490 0x38008, 0x38009, 0x3800A, 0x3800B, 0x3800C, 0x3800D, 0x3800E, 0x3800F,
491 0x38010, 0x38011, 0x38012, 0x38013, 0x38014, 0x38015, 0x38016, 0x38017,
492 0x400C0, 0x400C1, 0x400C2, 0x400C3, 0x400C4, 0x400C5
493 };
494
495 const uint32_t fixed_d_sym[30] = {
496 0x28000, 0x28001, 0x28002, 0x28003, 0x28004, 0x28005, 0x28006, 0x28007,
497 0x28008, 0x28009, 0x2800A, 0x2800B, 0x2800C, 0x2800D, 0x2800E, 0x2800F,
498 0x28010, 0x28011, 0x28012, 0x28013, 0x28014, 0x28015, 0x28016, 0x28017,
499 0x28018, 0x28019, 0x2801A, 0x2801B, 0x2801C, 0x2801D
500 };
501 #define DYNAMIC_HDR 0x2
502 #define DYNAMIC_HDR_SIZE 3
503
504 /* Caller must hold g_driver_lock */
505 static struct spdk_idxd_device *
idxd_attach(struct spdk_pci_device * device)506 idxd_attach(struct spdk_pci_device *device)
507 {
508 struct spdk_user_idxd_device *user_idxd;
509 struct spdk_idxd_device *idxd;
510 uint16_t did = device->id.device_id;
511 uint32_t cmd_reg;
512 uint64_t updated = sizeof(struct iaa_aecs);
513 int rc;
514
515 user_idxd = calloc(1, sizeof(struct spdk_user_idxd_device));
516 if (user_idxd == NULL) {
517 SPDK_ERRLOG("Failed to allocate memory for user_idxd device.\n");
518 return NULL;
519 }
520
521 idxd = &user_idxd->idxd;
522 if (did == PCI_DEVICE_ID_INTEL_DSA) {
523 idxd->type = IDXD_DEV_TYPE_DSA;
524 } else if (did == PCI_DEVICE_ID_INTEL_IAA) {
525 idxd->type = IDXD_DEV_TYPE_IAA;
526 idxd->aecs = spdk_zmalloc(sizeof(struct iaa_aecs),
527 0x20, NULL,
528 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
529 if (idxd->aecs == NULL) {
530 SPDK_ERRLOG("Failed to allocate iaa aecs\n");
531 goto err;
532 }
533
534 idxd->aecs_addr = spdk_vtophys((void *)idxd->aecs, &updated);
535 if (idxd->aecs_addr == SPDK_VTOPHYS_ERROR || updated < sizeof(struct iaa_aecs)) {
536 SPDK_ERRLOG("Failed to translate iaa aecs\n");
537 spdk_free(idxd->aecs);
538 goto err;
539 }
540
541 /* Configure aecs table using fixed Huffman table */
542 idxd->aecs->output_accum[0] = DYNAMIC_HDR | 1;
543 idxd->aecs->num_output_accum_bits = DYNAMIC_HDR_SIZE;
544
545 /* Add Huffman table to aecs */
546 memcpy(idxd->aecs->ll_sym, fixed_ll_sym, sizeof(fixed_ll_sym));
547 memcpy(idxd->aecs->d_sym, fixed_d_sym, sizeof(fixed_d_sym));
548 }
549
550 user_idxd->device = device;
551 idxd->impl = &g_user_idxd_impl;
552 idxd->socket_id = device->socket_id;
553 pthread_mutex_init(&idxd->num_channels_lock, NULL);
554
555 /* Enable PCI busmaster. */
556 spdk_pci_device_cfg_read32(device, &cmd_reg, 4);
557 cmd_reg |= 0x4;
558 spdk_pci_device_cfg_write32(device, cmd_reg, 4);
559
560 rc = idxd_device_configure(user_idxd);
561 if (rc) {
562 goto err;
563 }
564
565 return idxd;
566 err:
567 user_idxd_device_destruct(idxd);
568 return NULL;
569 }
570
571 SPDK_IDXD_IMPL_REGISTER(user, &g_user_idxd_impl);
572