1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2020 Intel Corporation 3 */ 4 5 #include <rte_string_fns.h> 6 #include <rte_devargs.h> 7 #include <ctype.h> 8 9 #include "qat_device.h" 10 #include "adf_transport_access_macros.h" 11 #include "qat_sym_pmd.h" 12 #include "qat_comp_pmd.h" 13 #include "adf_pf2vf_msg.h" 14 #include "qat_pf2vf.h" 15 16 /* Hardware device information per generation */ 17 struct qat_gen_hw_data qat_gen_config[QAT_N_GENS]; 18 struct qat_dev_hw_spec_funcs *qat_dev_hw_spec[QAT_N_GENS]; 19 20 /* per-process array of device data */ 21 struct qat_device_info qat_pci_devs[RTE_PMD_QAT_MAX_PCI_DEVICES]; 22 static int qat_nb_pci_devices; 23 24 /* 25 * The set of PCI devices this driver supports 26 */ 27 28 static const struct rte_pci_id pci_id_qat_map[] = { 29 { 30 RTE_PCI_DEVICE(0x8086, 0x0443), 31 }, 32 { 33 RTE_PCI_DEVICE(0x8086, 0x37c9), 34 }, 35 { 36 RTE_PCI_DEVICE(0x8086, 0x19e3), 37 }, 38 { 39 RTE_PCI_DEVICE(0x8086, 0x6f55), 40 }, 41 { 42 RTE_PCI_DEVICE(0x8086, 0x18ef), 43 }, 44 { 45 RTE_PCI_DEVICE(0x8086, 0x18a1), 46 }, 47 { 48 RTE_PCI_DEVICE(0x8086, 0x4941), 49 }, 50 {.device_id = 0}, 51 }; 52 53 static int 54 qat_pci_get_extra_size(enum qat_device_gen qat_dev_gen) 55 { 56 struct qat_dev_hw_spec_funcs *ops_hw = 57 qat_dev_hw_spec[qat_dev_gen]; 58 RTE_FUNC_PTR_OR_ERR_RET(ops_hw->qat_dev_get_extra_size, 59 -ENOTSUP); 60 return ops_hw->qat_dev_get_extra_size(); 61 } 62 63 static struct qat_pci_device * 64 qat_pci_get_named_dev(const char *name) 65 { 66 unsigned int i; 67 68 if (name == NULL) 69 return NULL; 70 71 for (i = 0; i < RTE_PMD_QAT_MAX_PCI_DEVICES; i++) { 72 if (qat_pci_devs[i].mz && 73 (strcmp(((struct qat_pci_device *) 74 qat_pci_devs[i].mz->addr)->name, name) 75 == 0)) 76 return (struct qat_pci_device *) 77 qat_pci_devs[i].mz->addr; 78 } 79 80 return NULL; 81 } 82 83 static uint8_t 84 qat_pci_find_free_device_index(void) 85 { 86 uint8_t dev_id; 87 88 for (dev_id = 0; dev_id < RTE_PMD_QAT_MAX_PCI_DEVICES; 89 dev_id++) { 90 if (qat_pci_devs[dev_id].mz == NULL) 91 break; 92 } 93 return dev_id; 94 } 95 96 struct qat_pci_device * 97 qat_get_qat_dev_from_pci_dev(struct rte_pci_device *pci_dev) 98 { 99 char name[QAT_DEV_NAME_MAX_LEN]; 100 101 rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); 102 103 return qat_pci_get_named_dev(name); 104 } 105 106 static void 107 qat_dev_parse_cmd(const char *str, struct qat_dev_cmd_param 108 *qat_dev_cmd_param) 109 { 110 int i = 0; 111 const char *param; 112 113 while (1) { 114 char value_str[4] = { }; 115 116 param = qat_dev_cmd_param[i].name; 117 if (param == NULL) 118 return; 119 long value = 0; 120 const char *arg = strstr(str, param); 121 const char *arg2 = NULL; 122 123 if (arg) { 124 arg2 = arg + strlen(param); 125 if (*arg2 != '=') { 126 QAT_LOG(DEBUG, "parsing error '=' sign" 127 " should immediately follow %s", 128 param); 129 arg2 = NULL; 130 } else 131 arg2++; 132 } else { 133 QAT_LOG(DEBUG, "%s not provided", param); 134 } 135 if (arg2) { 136 int iter = 0; 137 while (iter < 2) { 138 if (!isdigit(*(arg2 + iter))) 139 break; 140 iter++; 141 } 142 if (!iter) { 143 QAT_LOG(DEBUG, "parsing error %s" 144 " no number provided", 145 param); 146 } else { 147 memcpy(value_str, arg2, iter); 148 value = strtol(value_str, NULL, 10); 149 if (value > MAX_QP_THRESHOLD_SIZE) { 150 QAT_LOG(DEBUG, "Exceeded max size of" 151 " threshold, setting to %d", 152 MAX_QP_THRESHOLD_SIZE); 153 value = MAX_QP_THRESHOLD_SIZE; 154 } 155 QAT_LOG(DEBUG, "parsing %s = %ld", 156 param, value); 157 } 158 } 159 qat_dev_cmd_param[i].val = value; 160 i++; 161 } 162 } 163 164 struct qat_pci_device * 165 qat_pci_device_allocate(struct rte_pci_device *pci_dev, 166 struct qat_dev_cmd_param *qat_dev_cmd_param) 167 { 168 struct qat_pci_device *qat_dev; 169 enum qat_device_gen qat_dev_gen; 170 uint8_t qat_dev_id = 0; 171 char name[QAT_DEV_NAME_MAX_LEN]; 172 struct rte_devargs *devargs = pci_dev->device.devargs; 173 struct qat_dev_hw_spec_funcs *ops_hw; 174 struct rte_mem_resource *mem_resource; 175 const struct rte_memzone *qat_dev_mz; 176 int qat_dev_size, extra_size; 177 178 rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); 179 snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat"); 180 181 switch (pci_dev->id.device_id) { 182 case 0x0443: 183 qat_dev_gen = QAT_GEN1; 184 break; 185 case 0x37c9: 186 case 0x19e3: 187 case 0x6f55: 188 case 0x18ef: 189 qat_dev_gen = QAT_GEN2; 190 break; 191 case 0x18a1: 192 qat_dev_gen = QAT_GEN3; 193 break; 194 case 0x4941: 195 qat_dev_gen = QAT_GEN4; 196 break; 197 default: 198 QAT_LOG(ERR, "Invalid dev_id, can't determine generation"); 199 return NULL; 200 } 201 202 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 203 const struct rte_memzone *mz = rte_memzone_lookup(name); 204 205 if (mz == NULL) { 206 QAT_LOG(ERR, 207 "Secondary can't find %s mz, did primary create device?", 208 name); 209 return NULL; 210 } 211 qat_dev = mz->addr; 212 qat_pci_devs[qat_dev->qat_dev_id].mz = mz; 213 qat_pci_devs[qat_dev->qat_dev_id].pci_dev = pci_dev; 214 qat_nb_pci_devices++; 215 QAT_LOG(DEBUG, "QAT device %d found, name %s, total QATs %d", 216 qat_dev->qat_dev_id, qat_dev->name, qat_nb_pci_devices); 217 return qat_dev; 218 } 219 220 if (qat_pci_get_named_dev(name) != NULL) { 221 QAT_LOG(ERR, "QAT device with name %s already allocated!", 222 name); 223 return NULL; 224 } 225 226 qat_dev_id = qat_pci_find_free_device_index(); 227 if (qat_dev_id == RTE_PMD_QAT_MAX_PCI_DEVICES) { 228 QAT_LOG(ERR, "Reached maximum number of QAT devices"); 229 return NULL; 230 } 231 232 extra_size = qat_pci_get_extra_size(qat_dev_gen); 233 if (extra_size < 0) { 234 QAT_LOG(ERR, "QAT internal error: no pci pointer for gen %d", 235 qat_dev_gen); 236 return NULL; 237 } 238 239 qat_dev_size = sizeof(struct qat_pci_device) + extra_size; 240 qat_dev_mz = rte_memzone_reserve(name, qat_dev_size, 241 rte_socket_id(), 0); 242 243 if (qat_dev_mz == NULL) { 244 QAT_LOG(ERR, "Error when allocating memzone for QAT_%d", 245 qat_dev_id); 246 return NULL; 247 } 248 249 qat_dev = qat_dev_mz->addr; 250 memset(qat_dev, 0, qat_dev_size); 251 qat_dev->dev_private = qat_dev + 1; 252 strlcpy(qat_dev->name, name, QAT_DEV_NAME_MAX_LEN); 253 qat_dev->qat_dev_id = qat_dev_id; 254 qat_pci_devs[qat_dev_id].pci_dev = pci_dev; 255 qat_dev->qat_dev_gen = qat_dev_gen; 256 257 ops_hw = qat_dev_hw_spec[qat_dev->qat_dev_gen]; 258 if (ops_hw->qat_dev_get_misc_bar == NULL) { 259 QAT_LOG(ERR, "qat_dev_get_misc_bar function pointer not set"); 260 rte_memzone_free(qat_dev_mz); 261 return NULL; 262 } 263 if (ops_hw->qat_dev_get_misc_bar(&mem_resource, pci_dev) == 0) { 264 if (mem_resource->addr == NULL) { 265 QAT_LOG(ERR, "QAT cannot get access to VF misc bar"); 266 rte_memzone_free(qat_dev_mz); 267 return NULL; 268 } 269 qat_dev->misc_bar_io_addr = mem_resource->addr; 270 } else 271 qat_dev->misc_bar_io_addr = NULL; 272 273 if (devargs && devargs->drv_str) 274 qat_dev_parse_cmd(devargs->drv_str, qat_dev_cmd_param); 275 276 if (qat_read_qp_config(qat_dev)) { 277 QAT_LOG(ERR, 278 "Cannot acquire ring configuration for QAT_%d", 279 qat_dev_id); 280 rte_memzone_free(qat_dev_mz); 281 return NULL; 282 } 283 284 /* No errors when allocating, attach memzone with 285 * qat_dev to list of devices 286 */ 287 qat_pci_devs[qat_dev_id].mz = qat_dev_mz; 288 289 rte_spinlock_init(&qat_dev->arb_csr_lock); 290 qat_nb_pci_devices++; 291 292 QAT_LOG(DEBUG, "QAT device %d found, name %s, total QATs %d", 293 qat_dev->qat_dev_id, qat_dev->name, qat_nb_pci_devices); 294 295 return qat_dev; 296 } 297 298 static int 299 qat_pci_device_release(struct rte_pci_device *pci_dev) 300 { 301 struct qat_pci_device *qat_dev; 302 char name[QAT_DEV_NAME_MAX_LEN]; 303 int busy = 0; 304 305 if (pci_dev == NULL) 306 return -EINVAL; 307 308 rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); 309 snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat"); 310 qat_dev = qat_pci_get_named_dev(name); 311 if (qat_dev != NULL) { 312 313 struct qat_device_info *inst = 314 &qat_pci_devs[qat_dev->qat_dev_id]; 315 /* Check that there are no service devs still on pci device */ 316 317 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 318 if (qat_dev->sym_dev != NULL) { 319 QAT_LOG(DEBUG, "QAT sym device %s is busy", 320 name); 321 busy = 1; 322 } 323 if (qat_dev->asym_dev != NULL) { 324 QAT_LOG(DEBUG, "QAT asym device %s is busy", 325 name); 326 busy = 1; 327 } 328 if (qat_dev->comp_dev != NULL) { 329 QAT_LOG(DEBUG, "QAT comp device %s is busy", 330 name); 331 busy = 1; 332 } 333 if (busy) 334 return -EBUSY; 335 rte_memzone_free(inst->mz); 336 } 337 memset(inst, 0, sizeof(struct qat_device_info)); 338 qat_nb_pci_devices--; 339 QAT_LOG(DEBUG, "QAT device %s released, total QATs %d", 340 name, qat_nb_pci_devices); 341 } 342 return 0; 343 } 344 345 static int 346 qat_pci_dev_destroy(struct qat_pci_device *qat_pci_dev, 347 struct rte_pci_device *pci_dev) 348 { 349 qat_sym_dev_destroy(qat_pci_dev); 350 qat_comp_dev_destroy(qat_pci_dev); 351 qat_asym_dev_destroy(qat_pci_dev); 352 return qat_pci_device_release(pci_dev); 353 } 354 355 static int qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 356 struct rte_pci_device *pci_dev) 357 { 358 int sym_ret = 0, asym_ret = 0, comp_ret = 0; 359 int num_pmds_created = 0; 360 struct qat_pci_device *qat_pci_dev; 361 struct qat_dev_hw_spec_funcs *ops_hw; 362 struct qat_dev_cmd_param qat_dev_cmd_param[] = { 363 { SYM_ENQ_THRESHOLD_NAME, 0 }, 364 { ASYM_ENQ_THRESHOLD_NAME, 0 }, 365 { COMP_ENQ_THRESHOLD_NAME, 0 }, 366 { NULL, 0 }, 367 }; 368 369 QAT_LOG(DEBUG, "Found QAT device at %02x:%02x.%x", 370 pci_dev->addr.bus, 371 pci_dev->addr.devid, 372 pci_dev->addr.function); 373 374 qat_pci_dev = qat_pci_device_allocate(pci_dev, qat_dev_cmd_param); 375 if (qat_pci_dev == NULL) 376 return -ENODEV; 377 378 ops_hw = qat_dev_hw_spec[qat_pci_dev->qat_dev_gen]; 379 RTE_FUNC_PTR_OR_ERR_RET(ops_hw->qat_dev_reset_ring_pairs, 380 -ENOTSUP); 381 if (ops_hw->qat_dev_reset_ring_pairs(qat_pci_dev)) { 382 QAT_LOG(ERR, 383 "Cannot reset ring pairs, does pf driver supports pf2vf comms?" 384 ); 385 return -ENODEV; 386 } 387 388 sym_ret = qat_sym_dev_create(qat_pci_dev, qat_dev_cmd_param); 389 if (sym_ret == 0) { 390 num_pmds_created++; 391 392 } 393 else 394 QAT_LOG(WARNING, 395 "Failed to create QAT SYM PMD on device %s", 396 qat_pci_dev->name); 397 398 comp_ret = qat_comp_dev_create(qat_pci_dev, qat_dev_cmd_param); 399 if (comp_ret == 0) 400 num_pmds_created++; 401 else 402 QAT_LOG(WARNING, 403 "Failed to create QAT COMP PMD on device %s", 404 qat_pci_dev->name); 405 406 asym_ret = qat_asym_dev_create(qat_pci_dev, qat_dev_cmd_param); 407 if (asym_ret == 0) 408 num_pmds_created++; 409 else 410 QAT_LOG(WARNING, 411 "Failed to create QAT ASYM PMD on device %s", 412 qat_pci_dev->name); 413 414 if (num_pmds_created == 0) 415 qat_pci_dev_destroy(qat_pci_dev, pci_dev); 416 417 return 0; 418 } 419 420 static int 421 qat_pci_remove(struct rte_pci_device *pci_dev) 422 { 423 struct qat_pci_device *qat_pci_dev; 424 425 if (pci_dev == NULL) 426 return -EINVAL; 427 428 qat_pci_dev = qat_get_qat_dev_from_pci_dev(pci_dev); 429 if (qat_pci_dev == NULL) 430 return 0; 431 432 return qat_pci_dev_destroy(qat_pci_dev, pci_dev); 433 } 434 435 static struct rte_pci_driver rte_qat_pmd = { 436 .id_table = pci_id_qat_map, 437 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 438 .probe = qat_pci_probe, 439 .remove = qat_pci_remove 440 }; 441 442 __rte_weak int 443 qat_sym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused, 444 struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused) 445 { 446 return 0; 447 } 448 449 __rte_weak int 450 qat_asym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused, 451 struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused) 452 { 453 return 0; 454 } 455 456 __rte_weak int 457 qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused) 458 { 459 return 0; 460 } 461 462 __rte_weak int 463 qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused) 464 { 465 return 0; 466 } 467 468 __rte_weak int 469 qat_comp_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused, 470 struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused) 471 { 472 return 0; 473 } 474 475 __rte_weak int 476 qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused) 477 { 478 return 0; 479 } 480 481 RTE_PMD_REGISTER_PCI(QAT_PCI_NAME, rte_qat_pmd); 482 RTE_PMD_REGISTER_PCI_TABLE(QAT_PCI_NAME, pci_id_qat_map); 483 RTE_PMD_REGISTER_KMOD_DEP(QAT_PCI_NAME, "* igb_uio | uio_pci_generic | vfio-pci"); 484