1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2021 Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #include "spdk/stdinc.h" 7 8 #include "spdk/env.h" 9 #include "spdk/util.h" 10 #include "spdk/memory.h" 11 #include "spdk/likely.h" 12 13 #include "spdk/log.h" 14 #include "spdk_internal/idxd.h" 15 16 #include "idxd_internal.h" 17 18 struct spdk_user_idxd_device { 19 struct spdk_idxd_device idxd; 20 struct spdk_pci_device *device; 21 int sock_id; 22 struct idxd_registers *registers; 23 }; 24 25 #define __user_idxd(idxd) (struct spdk_user_idxd_device *)idxd 26 27 pthread_mutex_t g_driver_lock = PTHREAD_MUTEX_INITIALIZER; 28 29 static struct spdk_idxd_device *idxd_attach(struct spdk_pci_device *device); 30 31 /* Used for control commands, not for descriptor submission. */ 32 static int 33 idxd_wait_cmd(struct spdk_user_idxd_device *user_idxd, int _timeout) 34 { 35 uint32_t timeout = _timeout; 36 union idxd_cmdsts_register cmd_status = {}; 37 38 cmd_status.raw = spdk_mmio_read_4(&user_idxd->registers->cmdsts.raw); 39 while (cmd_status.active && --timeout) { 40 usleep(1); 41 cmd_status.raw = spdk_mmio_read_4(&user_idxd->registers->cmdsts.raw); 42 } 43 44 /* Check for timeout */ 45 if (timeout == 0 && cmd_status.active) { 46 SPDK_ERRLOG("Command timeout, waited %u\n", _timeout); 47 return -EBUSY; 48 } 49 50 /* Check for error */ 51 if (cmd_status.err) { 52 SPDK_ERRLOG("Command status reg reports error 0x%x\n", cmd_status.err); 53 return -EINVAL; 54 } 55 56 return 0; 57 } 58 59 static int 60 idxd_unmap_pci_bar(struct spdk_user_idxd_device *user_idxd, int bar) 61 { 62 int rc = 0; 63 void *addr = NULL; 64 65 if (bar == IDXD_MMIO_BAR) { 66 addr = (void *)user_idxd->registers; 67 } else if (bar == IDXD_WQ_BAR) { 68 addr = (void *)user_idxd->idxd.portal; 69 } 70 71 if (addr) { 72 rc = spdk_pci_device_unmap_bar(user_idxd->device, 0, addr); 73 } 74 return rc; 75 } 76 77 static int 78 idxd_map_pci_bars(struct spdk_user_idxd_device *user_idxd) 79 { 80 int rc; 81 void *addr; 82 uint64_t phys_addr, size; 83 84 rc = spdk_pci_device_map_bar(user_idxd->device, IDXD_MMIO_BAR, &addr, &phys_addr, &size); 85 if (rc != 0 || addr == NULL) { 86 SPDK_ERRLOG("pci_device_map_range failed with error code %d\n", rc); 87 return -1; 88 } 89 user_idxd->registers = (struct idxd_registers *)addr; 90 91 rc = spdk_pci_device_map_bar(user_idxd->device, IDXD_WQ_BAR, &addr, &phys_addr, &size); 92 if (rc != 0 || addr == NULL) { 93 SPDK_ERRLOG("pci_device_map_range failed with error code %d\n", rc); 94 rc = idxd_unmap_pci_bar(user_idxd, IDXD_MMIO_BAR); 95 if (rc) { 96 SPDK_ERRLOG("unable to unmap MMIO bar\n"); 97 } 98 return -EINVAL; 99 } 100 user_idxd->idxd.portal = addr; 101 102 return 0; 103 } 104 105 static void 106 idxd_disable_dev(struct spdk_user_idxd_device *user_idxd) 107 { 108 int rc; 109 union idxd_cmd_register cmd = {}; 110 111 cmd.command_code = IDXD_DISABLE_DEV; 112 113 assert(&user_idxd->registers->cmd.raw); /* scan-build */ 114 spdk_mmio_write_4(&user_idxd->registers->cmd.raw, cmd.raw); 115 rc = idxd_wait_cmd(user_idxd, IDXD_REGISTER_TIMEOUT_US); 116 if (rc < 0) { 117 SPDK_ERRLOG("Error disabling device %u\n", rc); 118 } 119 } 120 121 static int 122 idxd_reset_dev(struct spdk_user_idxd_device *user_idxd) 123 { 124 int rc; 125 union idxd_cmd_register cmd = {}; 126 127 cmd.command_code = IDXD_RESET_DEVICE; 128 129 spdk_mmio_write_4(&user_idxd->registers->cmd.raw, cmd.raw); 130 rc = idxd_wait_cmd(user_idxd, IDXD_REGISTER_TIMEOUT_US); 131 if (rc < 0) { 132 SPDK_ERRLOG("Error resetting device %u\n", rc); 133 } 134 135 return rc; 136 } 137 138 static int 139 idxd_group_config(struct spdk_user_idxd_device *user_idxd) 140 { 141 int i; 142 union idxd_groupcap_register groupcap; 143 union idxd_enginecap_register enginecap; 144 union idxd_wqcap_register wqcap; 145 union idxd_offsets_register table_offsets; 146 147 struct idxd_grptbl *grptbl; 148 struct idxd_grpcfg grpcfg = {}; 149 150 groupcap.raw = spdk_mmio_read_8(&user_idxd->registers->groupcap.raw); 151 enginecap.raw = spdk_mmio_read_8(&user_idxd->registers->enginecap.raw); 152 wqcap.raw = spdk_mmio_read_8(&user_idxd->registers->wqcap.raw); 153 154 if (wqcap.num_wqs < 1) { 155 return -ENOTSUP; 156 } 157 158 /* Build one group with all of the engines and a single work queue. */ 159 grpcfg.wqs[0] = 1; 160 grpcfg.flags.read_buffers_allowed = groupcap.read_bufs; 161 for (i = 0; i < enginecap.num_engines; i++) { 162 grpcfg.engines |= (1 << i); 163 } 164 165 table_offsets.raw[0] = spdk_mmio_read_8(&user_idxd->registers->offsets.raw[0]); 166 table_offsets.raw[1] = spdk_mmio_read_8(&user_idxd->registers->offsets.raw[1]); 167 168 grptbl = (struct idxd_grptbl *)((uint8_t *)user_idxd->registers + (table_offsets.grpcfg * 169 IDXD_TABLE_OFFSET_MULT)); 170 171 /* Write the group we've configured */ 172 spdk_mmio_write_8(&grptbl->group[0].wqs[0], grpcfg.wqs[0]); 173 spdk_mmio_write_8(&grptbl->group[0].wqs[1], 0); 174 spdk_mmio_write_8(&grptbl->group[0].wqs[2], 0); 175 spdk_mmio_write_8(&grptbl->group[0].wqs[3], 0); 176 spdk_mmio_write_8(&grptbl->group[0].engines, grpcfg.engines); 177 spdk_mmio_write_4(&grptbl->group[0].flags.raw, grpcfg.flags.raw); 178 179 /* Write zeroes to the rest of the groups */ 180 for (i = 1 ; i < groupcap.num_groups; i++) { 181 spdk_mmio_write_8(&grptbl->group[i].wqs[0], 0L); 182 spdk_mmio_write_8(&grptbl->group[i].wqs[1], 0L); 183 spdk_mmio_write_8(&grptbl->group[i].wqs[2], 0L); 184 spdk_mmio_write_8(&grptbl->group[i].wqs[3], 0L); 185 spdk_mmio_write_8(&grptbl->group[i].engines, 0L); 186 spdk_mmio_write_4(&grptbl->group[i].flags.raw, 0L); 187 } 188 189 return 0; 190 } 191 192 static int 193 idxd_wq_config(struct spdk_user_idxd_device *user_idxd) 194 { 195 uint32_t i; 196 struct spdk_idxd_device *idxd = &user_idxd->idxd; 197 union idxd_wqcap_register wqcap; 198 union idxd_offsets_register table_offsets; 199 union idxd_wqcfg *wqcfg; 200 201 wqcap.raw = spdk_mmio_read_8(&user_idxd->registers->wqcap.raw); 202 203 SPDK_DEBUGLOG(idxd, "Total ring slots available 0x%x\n", wqcap.total_wq_size); 204 205 idxd->total_wq_size = wqcap.total_wq_size; 206 /* Spread the channels we allow per device based on the total number of WQE to try 207 * and achieve optimal performance for common cases. 208 */ 209 idxd->chan_per_device = (idxd->total_wq_size >= 128) ? 8 : 4; 210 211 table_offsets.raw[0] = spdk_mmio_read_8(&user_idxd->registers->offsets.raw[0]); 212 table_offsets.raw[1] = spdk_mmio_read_8(&user_idxd->registers->offsets.raw[1]); 213 214 wqcfg = (union idxd_wqcfg *)((uint8_t *)user_idxd->registers + (table_offsets.wqcfg * 215 IDXD_TABLE_OFFSET_MULT)); 216 217 for (i = 0 ; i < SPDK_COUNTOF(wqcfg->raw); i++) { 218 wqcfg->raw[i] = spdk_mmio_read_4(&wqcfg->raw[i]); 219 } 220 221 wqcfg->wq_size = wqcap.total_wq_size; 222 wqcfg->mode = WQ_MODE_DEDICATED; 223 wqcfg->max_batch_shift = LOG2_WQ_MAX_BATCH; 224 wqcfg->max_xfer_shift = LOG2_WQ_MAX_XFER; 225 wqcfg->wq_state = WQ_ENABLED; 226 wqcfg->priority = WQ_PRIORITY_1; 227 228 for (i = 0; i < SPDK_COUNTOF(wqcfg->raw); i++) { 229 spdk_mmio_write_4(&wqcfg->raw[i], wqcfg->raw[i]); 230 } 231 232 return 0; 233 } 234 235 static int 236 idxd_device_configure(struct spdk_user_idxd_device *user_idxd) 237 { 238 int rc = 0; 239 union idxd_gensts_register gensts_reg; 240 union idxd_cmd_register cmd = {}; 241 242 /* 243 * Map BAR0 and BAR2 244 */ 245 rc = idxd_map_pci_bars(user_idxd); 246 if (rc) { 247 return rc; 248 } 249 250 /* 251 * Reset the device 252 */ 253 rc = idxd_reset_dev(user_idxd); 254 if (rc) { 255 goto err_reset; 256 } 257 258 /* 259 * Save the device version for use in the common library code. 260 */ 261 user_idxd->idxd.version = user_idxd->registers->version; 262 263 /* 264 * Configure groups and work queues. 265 */ 266 rc = idxd_group_config(user_idxd); 267 if (rc) { 268 goto err_group_cfg; 269 } 270 271 rc = idxd_wq_config(user_idxd); 272 if (rc) { 273 goto err_wq_cfg; 274 } 275 276 /* 277 * Enable the device 278 */ 279 gensts_reg.raw = spdk_mmio_read_4(&user_idxd->registers->gensts.raw); 280 assert(gensts_reg.state == IDXD_DEVICE_STATE_DISABLED); 281 282 cmd.command_code = IDXD_ENABLE_DEV; 283 284 spdk_mmio_write_4(&user_idxd->registers->cmd.raw, cmd.raw); 285 rc = idxd_wait_cmd(user_idxd, IDXD_REGISTER_TIMEOUT_US); 286 gensts_reg.raw = spdk_mmio_read_4(&user_idxd->registers->gensts.raw); 287 if ((rc < 0) || (gensts_reg.state != IDXD_DEVICE_STATE_ENABLED)) { 288 rc = -EINVAL; 289 SPDK_ERRLOG("Error enabling device %u\n", rc); 290 goto err_device_enable; 291 } 292 293 /* 294 * Enable the work queue that we've configured 295 */ 296 cmd.command_code = IDXD_ENABLE_WQ; 297 cmd.operand = 0; 298 299 spdk_mmio_write_4(&user_idxd->registers->cmd.raw, cmd.raw); 300 rc = idxd_wait_cmd(user_idxd, IDXD_REGISTER_TIMEOUT_US); 301 if (rc < 0) { 302 SPDK_ERRLOG("Error enabling work queues 0x%x\n", rc); 303 goto err_wq_enable; 304 } 305 306 if ((rc == 0) && (gensts_reg.state == IDXD_DEVICE_STATE_ENABLED)) { 307 SPDK_DEBUGLOG(idxd, "Device enabled VID 0x%x DID 0x%x\n", 308 user_idxd->device->id.vendor_id, user_idxd->device->id.device_id); 309 } 310 311 return rc; 312 err_wq_enable: 313 err_device_enable: 314 err_wq_cfg: 315 err_group_cfg: 316 err_reset: 317 idxd_unmap_pci_bar(user_idxd, IDXD_MMIO_BAR); 318 idxd_unmap_pci_bar(user_idxd, IDXD_MMIO_BAR); 319 320 return rc; 321 } 322 323 static void 324 user_idxd_device_destruct(struct spdk_idxd_device *idxd) 325 { 326 struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd); 327 328 idxd_disable_dev(user_idxd); 329 330 idxd_unmap_pci_bar(user_idxd, IDXD_MMIO_BAR); 331 idxd_unmap_pci_bar(user_idxd, IDXD_WQ_BAR); 332 333 spdk_pci_device_detach(user_idxd->device); 334 if (idxd->type == IDXD_DEV_TYPE_IAA) { 335 spdk_free(idxd->aecs); 336 } 337 free(user_idxd); 338 } 339 340 struct idxd_enum_ctx { 341 spdk_idxd_probe_cb probe_cb; 342 spdk_idxd_attach_cb attach_cb; 343 void *cb_ctx; 344 }; 345 346 static bool 347 probe_cb(void *cb_ctx, struct spdk_pci_device *pci_dev) 348 { 349 struct spdk_pci_addr pci_addr __attribute__((unused)); 350 351 pci_addr = spdk_pci_device_get_addr(pci_dev); 352 353 SPDK_DEBUGLOG(idxd, 354 " Found matching device at %04x:%02x:%02x.%x vendor:0x%04x device:0x%04x\n", 355 pci_addr.domain, 356 pci_addr.bus, 357 pci_addr.dev, 358 pci_addr.func, 359 spdk_pci_device_get_vendor_id(pci_dev), 360 spdk_pci_device_get_device_id(pci_dev)); 361 362 /* Claim the device in case conflict with other process */ 363 if (spdk_pci_device_claim(pci_dev) < 0) { 364 return false; 365 } 366 367 return true; 368 } 369 370 /* This function must only be called while holding g_driver_lock */ 371 static int 372 idxd_enum_cb(void *ctx, struct spdk_pci_device *pci_dev) 373 { 374 struct idxd_enum_ctx *enum_ctx = ctx; 375 struct spdk_idxd_device *idxd; 376 377 /* Call the user probe_cb to see if they want this device or not, if not 378 * skip it with a positive return code. 379 */ 380 if (enum_ctx->probe_cb(enum_ctx->cb_ctx, pci_dev) == false) { 381 return 1; 382 } 383 384 if (probe_cb(enum_ctx->cb_ctx, pci_dev)) { 385 idxd = idxd_attach(pci_dev); 386 if (idxd == NULL) { 387 SPDK_ERRLOG("idxd_attach() failed\n"); 388 return -EINVAL; 389 } 390 391 enum_ctx->attach_cb(enum_ctx->cb_ctx, idxd); 392 } 393 394 return 0; 395 } 396 397 /* The IDXD driver supports 2 distinct HW units, DSA and IAA. */ 398 static int 399 user_idxd_probe(void *cb_ctx, spdk_idxd_attach_cb attach_cb, 400 spdk_idxd_probe_cb probe_cb) 401 { 402 int rc; 403 struct idxd_enum_ctx enum_ctx; 404 405 enum_ctx.probe_cb = probe_cb; 406 enum_ctx.attach_cb = attach_cb; 407 enum_ctx.cb_ctx = cb_ctx; 408 409 pthread_mutex_lock(&g_driver_lock); 410 rc = spdk_pci_enumerate(spdk_pci_idxd_get_driver(), idxd_enum_cb, &enum_ctx); 411 pthread_mutex_unlock(&g_driver_lock); 412 assert(rc == 0); 413 414 return rc; 415 } 416 417 static void 418 user_idxd_dump_sw_err(struct spdk_idxd_device *idxd, void *portal) 419 { 420 struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd); 421 union idxd_swerr_register sw_err; 422 uint16_t i; 423 424 SPDK_NOTICELOG("SW Error Raw:"); 425 for (i = 0; i < 4; i++) { 426 sw_err.raw[i] = spdk_mmio_read_8(&user_idxd->registers->sw_err.raw[i]); 427 SPDK_NOTICELOG(" 0x%lx\n", sw_err.raw[i]); 428 } 429 430 SPDK_NOTICELOG("SW Error error code: %#x\n", (uint8_t)(sw_err.error)); 431 SPDK_NOTICELOG("SW Error WQ index: %u\n", (uint8_t)(sw_err.wq_idx)); 432 SPDK_NOTICELOG("SW Error Operation: %u\n", (uint8_t)(sw_err.operation)); 433 } 434 435 static char * 436 user_idxd_portal_get_addr(struct spdk_idxd_device *idxd) 437 { 438 return (char *)idxd->portal; 439 } 440 441 static struct spdk_idxd_impl g_user_idxd_impl = { 442 .name = "user", 443 .probe = user_idxd_probe, 444 .destruct = user_idxd_device_destruct, 445 .dump_sw_error = user_idxd_dump_sw_err, 446 .portal_get_addr = user_idxd_portal_get_addr 447 }; 448 449 /* 450 * Fixed Huffman tables the IAA hardware requires to implement RFC-1951. 451 */ 452 const uint32_t fixed_ll_sym[286] = { 453 0x40030, 0x40031, 0x40032, 0x40033, 0x40034, 0x40035, 0x40036, 0x40037, 454 0x40038, 0x40039, 0x4003A, 0x4003B, 0x4003C, 0x4003D, 0x4003E, 0x4003F, 455 0x40040, 0x40041, 0x40042, 0x40043, 0x40044, 0x40045, 0x40046, 0x40047, 456 0x40048, 0x40049, 0x4004A, 0x4004B, 0x4004C, 0x4004D, 0x4004E, 0x4004F, 457 0x40050, 0x40051, 0x40052, 0x40053, 0x40054, 0x40055, 0x40056, 0x40057, 458 0x40058, 0x40059, 0x4005A, 0x4005B, 0x4005C, 0x4005D, 0x4005E, 0x4005F, 459 0x40060, 0x40061, 0x40062, 0x40063, 0x40064, 0x40065, 0x40066, 0x40067, 460 0x40068, 0x40069, 0x4006A, 0x4006B, 0x4006C, 0x4006D, 0x4006E, 0x4006F, 461 0x40070, 0x40071, 0x40072, 0x40073, 0x40074, 0x40075, 0x40076, 0x40077, 462 0x40078, 0x40079, 0x4007A, 0x4007B, 0x4007C, 0x4007D, 0x4007E, 0x4007F, 463 0x40080, 0x40081, 0x40082, 0x40083, 0x40084, 0x40085, 0x40086, 0x40087, 464 0x40088, 0x40089, 0x4008A, 0x4008B, 0x4008C, 0x4008D, 0x4008E, 0x4008F, 465 0x40090, 0x40091, 0x40092, 0x40093, 0x40094, 0x40095, 0x40096, 0x40097, 466 0x40098, 0x40099, 0x4009A, 0x4009B, 0x4009C, 0x4009D, 0x4009E, 0x4009F, 467 0x400A0, 0x400A1, 0x400A2, 0x400A3, 0x400A4, 0x400A5, 0x400A6, 0x400A7, 468 0x400A8, 0x400A9, 0x400AA, 0x400AB, 0x400AC, 0x400AD, 0x400AE, 0x400AF, 469 0x400B0, 0x400B1, 0x400B2, 0x400B3, 0x400B4, 0x400B5, 0x400B6, 0x400B7, 470 0x400B8, 0x400B9, 0x400BA, 0x400BB, 0x400BC, 0x400BD, 0x400BE, 0x400BF, 471 0x48190, 0x48191, 0x48192, 0x48193, 0x48194, 0x48195, 0x48196, 0x48197, 472 0x48198, 0x48199, 0x4819A, 0x4819B, 0x4819C, 0x4819D, 0x4819E, 0x4819F, 473 0x481A0, 0x481A1, 0x481A2, 0x481A3, 0x481A4, 0x481A5, 0x481A6, 0x481A7, 474 0x481A8, 0x481A9, 0x481AA, 0x481AB, 0x481AC, 0x481AD, 0x481AE, 0x481AF, 475 0x481B0, 0x481B1, 0x481B2, 0x481B3, 0x481B4, 0x481B5, 0x481B6, 0x481B7, 476 0x481B8, 0x481B9, 0x481BA, 0x481BB, 0x481BC, 0x481BD, 0x481BE, 0x481BF, 477 0x481C0, 0x481C1, 0x481C2, 0x481C3, 0x481C4, 0x481C5, 0x481C6, 0x481C7, 478 0x481C8, 0x481C9, 0x481CA, 0x481CB, 0x481CC, 0x481CD, 0x481CE, 0x481CF, 479 0x481D0, 0x481D1, 0x481D2, 0x481D3, 0x481D4, 0x481D5, 0x481D6, 0x481D7, 480 0x481D8, 0x481D9, 0x481DA, 0x481DB, 0x481DC, 0x481DD, 0x481DE, 0x481DF, 481 0x481E0, 0x481E1, 0x481E2, 0x481E3, 0x481E4, 0x481E5, 0x481E6, 0x481E7, 482 0x481E8, 0x481E9, 0x481EA, 0x481EB, 0x481EC, 0x481ED, 0x481EE, 0x481EF, 483 0x481F0, 0x481F1, 0x481F2, 0x481F3, 0x481F4, 0x481F5, 0x481F6, 0x481F7, 484 0x481F8, 0x481F9, 0x481FA, 0x481FB, 0x481FC, 0x481FD, 0x481FE, 0x481FF, 485 0x38000, 0x38001, 0x38002, 0x38003, 0x38004, 0x38005, 0x38006, 0x38007, 486 0x38008, 0x38009, 0x3800A, 0x3800B, 0x3800C, 0x3800D, 0x3800E, 0x3800F, 487 0x38010, 0x38011, 0x38012, 0x38013, 0x38014, 0x38015, 0x38016, 0x38017, 488 0x400C0, 0x400C1, 0x400C2, 0x400C3, 0x400C4, 0x400C5 489 }; 490 491 const uint32_t fixed_d_sym[30] = { 492 0x28000, 0x28001, 0x28002, 0x28003, 0x28004, 0x28005, 0x28006, 0x28007, 493 0x28008, 0x28009, 0x2800A, 0x2800B, 0x2800C, 0x2800D, 0x2800E, 0x2800F, 494 0x28010, 0x28011, 0x28012, 0x28013, 0x28014, 0x28015, 0x28016, 0x28017, 495 0x28018, 0x28019, 0x2801A, 0x2801B, 0x2801C, 0x2801D 496 }; 497 #define DYNAMIC_HDR 0x2 498 #define DYNAMIC_HDR_SIZE 3 499 500 /* Caller must hold g_driver_lock */ 501 static struct spdk_idxd_device * 502 idxd_attach(struct spdk_pci_device *device) 503 { 504 struct spdk_user_idxd_device *user_idxd; 505 struct spdk_idxd_device *idxd; 506 uint16_t did = device->id.device_id; 507 uint32_t cmd_reg; 508 int rc; 509 510 user_idxd = calloc(1, sizeof(struct spdk_user_idxd_device)); 511 if (user_idxd == NULL) { 512 SPDK_ERRLOG("Failed to allocate memory for user_idxd device.\n"); 513 return NULL; 514 } 515 516 idxd = &user_idxd->idxd; 517 if (did == PCI_DEVICE_ID_INTEL_DSA) { 518 idxd->type = IDXD_DEV_TYPE_DSA; 519 } else if (did == PCI_DEVICE_ID_INTEL_IAA) { 520 idxd->type = IDXD_DEV_TYPE_IAA; 521 idxd->aecs = spdk_zmalloc(sizeof(struct iaa_aecs), 522 0x20, NULL, 523 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 524 if (idxd->aecs == NULL) { 525 SPDK_ERRLOG("Failed to allocate iaa aecs\n"); 526 goto err; 527 } 528 /* Configure aecs table using fixed Huffman table */ 529 idxd->aecs->output_accum[0] = DYNAMIC_HDR | 1; 530 idxd->aecs->num_output_accum_bits = DYNAMIC_HDR_SIZE; 531 532 /* Add Huffman table to aecs */ 533 memcpy(idxd->aecs->ll_sym, fixed_ll_sym, sizeof(fixed_ll_sym)); 534 memcpy(idxd->aecs->d_sym, fixed_d_sym, sizeof(fixed_d_sym)); 535 } 536 537 user_idxd->device = device; 538 idxd->impl = &g_user_idxd_impl; 539 idxd->socket_id = device->socket_id; 540 pthread_mutex_init(&idxd->num_channels_lock, NULL); 541 542 /* Enable PCI busmaster. */ 543 spdk_pci_device_cfg_read32(device, &cmd_reg, 4); 544 cmd_reg |= 0x4; 545 spdk_pci_device_cfg_write32(device, cmd_reg, 4); 546 547 rc = idxd_device_configure(user_idxd); 548 if (rc) { 549 goto err; 550 } 551 552 return idxd; 553 err: 554 user_idxd_device_destruct(idxd); 555 return NULL; 556 } 557 558 SPDK_IDXD_IMPL_REGISTER(user, &g_user_idxd_impl); 559