1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2019 Intel Corporation 3 */ 4 5 #include <rte_malloc.h> 6 7 #include "qat_comp.h" 8 #include "qat_comp_pmd.h" 9 10 #define QAT_PMD_COMP_SGL_DEF_SEGMENTS 16 11 12 struct qat_comp_gen_dev_ops qat_comp_gen_dev_ops[QAT_N_GENS]; 13 14 struct stream_create_info { 15 struct qat_comp_dev_private *comp_dev; 16 int socket_id; 17 int error; 18 }; 19 20 static struct 21 qat_comp_capabilities_info qat_comp_get_capa_info( 22 enum qat_device_gen qat_dev_gen, struct qat_pci_device *qat_dev) 23 { 24 struct qat_comp_capabilities_info ret = { .data = NULL, .size = 0 }; 25 26 if (qat_dev_gen >= QAT_N_GENS) 27 return ret; 28 RTE_FUNC_PTR_OR_ERR_RET(qat_comp_gen_dev_ops[qat_dev_gen] 29 .qat_comp_get_capabilities, ret); 30 return qat_comp_gen_dev_ops[qat_dev_gen] 31 .qat_comp_get_capabilities(qat_dev); 32 } 33 34 void 35 qat_comp_stats_get(struct rte_compressdev *dev, 36 struct rte_compressdev_stats *stats) 37 { 38 struct qat_common_stats qat_stats = {0}; 39 struct qat_comp_dev_private *qat_priv; 40 41 if (stats == NULL || dev == NULL) { 42 QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev); 43 return; 44 } 45 qat_priv = dev->data->dev_private; 46 47 qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_COMPRESSION); 48 stats->enqueued_count = qat_stats.enqueued_count; 49 stats->dequeued_count = qat_stats.dequeued_count; 50 stats->enqueue_err_count = qat_stats.enqueue_err_count; 51 stats->dequeue_err_count = qat_stats.dequeue_err_count; 52 } 53 54 void 55 qat_comp_stats_reset(struct rte_compressdev *dev) 56 { 57 struct qat_comp_dev_private *qat_priv; 58 59 if (dev == NULL) { 60 QAT_LOG(ERR, "invalid compressdev ptr %p", dev); 61 return; 62 } 63 qat_priv = dev->data->dev_private; 64 65 qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_COMPRESSION); 66 67 } 68 69 int 70 qat_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id) 71 { 72 struct qat_comp_dev_private *qat_private = dev->data->dev_private; 73 struct qat_qp **qp_addr = 74 (struct qat_qp **)&(dev->data->queue_pairs[queue_pair_id]); 75 struct qat_qp *qp = (struct qat_qp *)*qp_addr; 76 enum qat_device_gen qat_dev_gen = qat_private->qat_dev->qat_dev_gen; 77 uint32_t i; 78 79 QAT_LOG(DEBUG, "Release comp qp %u on device %d", 80 queue_pair_id, dev->data->dev_id); 81 82 qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][queue_pair_id] 83 = NULL; 84 85 if (qp != NULL) 86 for (i = 0; i < qp->nb_descriptors; i++) { 87 struct qat_comp_op_cookie *cookie = qp->op_cookies[i]; 88 89 rte_free(cookie->qat_sgl_src_d); 90 rte_free(cookie->qat_sgl_dst_d); 91 } 92 93 return qat_qp_release(qat_dev_gen, (struct qat_qp **) 94 &(dev->data->queue_pairs[queue_pair_id])); 95 } 96 97 int 98 qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id, 99 uint32_t max_inflight_ops, int socket_id) 100 { 101 struct qat_qp_config qat_qp_conf = {0}; 102 struct qat_qp **qp_addr = 103 (struct qat_qp **)&(dev->data->queue_pairs[qp_id]); 104 struct qat_comp_dev_private *qat_private = dev->data->dev_private; 105 struct qat_pci_device *qat_dev = qat_private->qat_dev; 106 struct qat_qp *qp; 107 uint32_t i; 108 int ret; 109 110 /* If qp is already in use free ring memory and qp metadata. */ 111 if (*qp_addr != NULL) { 112 ret = qat_comp_qp_release(dev, qp_id); 113 if (ret < 0) 114 return ret; 115 } 116 if (qp_id >= qat_qps_per_service(qat_dev, 117 QAT_SERVICE_COMPRESSION)) { 118 QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id); 119 return -EINVAL; 120 } 121 122 123 qat_qp_conf.hw = qat_qp_get_hw_data(qat_dev, QAT_SERVICE_COMPRESSION, 124 qp_id); 125 if (qat_qp_conf.hw == NULL) { 126 QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id); 127 return -EINVAL; 128 } 129 qat_qp_conf.cookie_size = sizeof(struct qat_comp_op_cookie); 130 qat_qp_conf.nb_descriptors = max_inflight_ops; 131 qat_qp_conf.socket_id = socket_id; 132 qat_qp_conf.service_str = "comp"; 133 134 ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf); 135 if (ret != 0) 136 return ret; 137 /* store a link to the qp in the qat_pci_device */ 138 qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][qp_id] 139 = *qp_addr; 140 141 qp = (struct qat_qp *)*qp_addr; 142 qp->min_enq_burst_threshold = qat_private->min_enq_burst_threshold; 143 144 for (i = 0; i < qp->nb_descriptors; i++) { 145 146 struct qat_comp_op_cookie *cookie = 147 qp->op_cookies[i]; 148 149 cookie->qp = qp; 150 cookie->cookie_index = i; 151 152 cookie->qat_sgl_src_d = rte_zmalloc_socket(NULL, 153 sizeof(struct qat_sgl) + 154 sizeof(struct qat_flat_buf) * 155 QAT_PMD_COMP_SGL_DEF_SEGMENTS, 156 64, dev->data->socket_id); 157 158 cookie->qat_sgl_dst_d = rte_zmalloc_socket(NULL, 159 sizeof(struct qat_sgl) + 160 sizeof(struct qat_flat_buf) * 161 QAT_PMD_COMP_SGL_DEF_SEGMENTS, 162 64, dev->data->socket_id); 163 164 if (cookie->qat_sgl_src_d == NULL || 165 cookie->qat_sgl_dst_d == NULL) { 166 QAT_LOG(ERR, "Can't allocate SGL" 167 " for device %s", 168 qat_private->qat_dev->name); 169 return -ENOMEM; 170 } 171 172 cookie->qat_sgl_src_phys_addr = 173 rte_malloc_virt2iova(cookie->qat_sgl_src_d); 174 175 cookie->qat_sgl_dst_phys_addr = 176 rte_malloc_virt2iova(cookie->qat_sgl_dst_d); 177 178 cookie->dst_nb_elems = cookie->src_nb_elems = 179 QAT_PMD_COMP_SGL_DEF_SEGMENTS; 180 181 cookie->socket_id = dev->data->socket_id; 182 183 cookie->error = 0; 184 } 185 186 return ret; 187 } 188 189 190 #define QAT_IM_BUFFER_DEBUG 0 191 const struct rte_memzone * 192 qat_comp_setup_inter_buffers(struct qat_comp_dev_private *comp_dev, 193 uint32_t buff_size) 194 { 195 char inter_buff_mz_name[RTE_MEMZONE_NAMESIZE]; 196 const struct rte_memzone *memzone; 197 uint8_t *mz_start = NULL; 198 rte_iova_t mz_start_phys = 0; 199 struct array_of_ptrs *array_of_pointers; 200 int size_of_ptr_array; 201 uint32_t full_size; 202 uint32_t offset_of_flat_buffs; 203 int i; 204 int num_im_sgls = qat_comp_get_num_im_bufs_required( 205 comp_dev->qat_dev->qat_dev_gen); 206 207 QAT_LOG(DEBUG, "QAT COMP device %s needs %d sgls", 208 comp_dev->qat_dev->name, num_im_sgls); 209 snprintf(inter_buff_mz_name, RTE_MEMZONE_NAMESIZE, 210 "%s_inter_buff", comp_dev->qat_dev->name); 211 memzone = rte_memzone_lookup(inter_buff_mz_name); 212 if (memzone != NULL) { 213 QAT_LOG(DEBUG, "QAT COMP im buffer memzone created already"); 214 return memzone; 215 } 216 217 /* Create multiple memzones to hold intermediate buffers and associated 218 * meta-data needed by the firmware. 219 * The first memzone contains: 220 * - a list of num_im_sgls physical pointers to sgls 221 * All other memzones contain: 222 * - the sgl structure, pointing to QAT_NUM_BUFS_IN_IM_SGL flat buffers 223 * - the flat buffers: QAT_NUM_BUFS_IN_IM_SGL buffers, 224 * each of buff_size 225 * num_im_sgls depends on the hardware generation of the device 226 * buff_size comes from the user via the config file 227 */ 228 229 size_of_ptr_array = num_im_sgls * sizeof(phys_addr_t); 230 offset_of_flat_buffs = sizeof(struct qat_inter_sgl); 231 full_size = offset_of_flat_buffs + 232 buff_size * QAT_NUM_BUFS_IN_IM_SGL; 233 234 memzone = rte_memzone_reserve_aligned(inter_buff_mz_name, 235 size_of_ptr_array, 236 comp_dev->compressdev->data->socket_id, 237 RTE_MEMZONE_IOVA_CONTIG, QAT_64_BYTE_ALIGN); 238 if (memzone == NULL) { 239 QAT_LOG(ERR, 240 "Can't allocate intermediate buffers for device %s", 241 comp_dev->qat_dev->name); 242 return NULL; 243 } 244 245 mz_start = (uint8_t *)memzone->addr; 246 mz_start_phys = memzone->iova; 247 QAT_LOG(DEBUG, "Memzone %s: addr = %p, phys = 0x%"PRIx64 248 ", size required %d, size created %zu", 249 inter_buff_mz_name, mz_start, mz_start_phys, 250 size_of_ptr_array, memzone->len); 251 252 array_of_pointers = (struct array_of_ptrs *)mz_start; 253 for (i = 0; i < num_im_sgls; i++) { 254 const struct rte_memzone *mz; 255 struct qat_inter_sgl *sgl; 256 int lb; 257 258 snprintf(inter_buff_mz_name, RTE_MEMZONE_NAMESIZE, 259 "%s_inter_buff_%d", comp_dev->qat_dev->name, i); 260 mz = rte_memzone_lookup(inter_buff_mz_name); 261 if (mz == NULL) { 262 mz = rte_memzone_reserve_aligned(inter_buff_mz_name, 263 full_size, 264 comp_dev->compressdev->data->socket_id, 265 RTE_MEMZONE_IOVA_CONTIG, 266 QAT_64_BYTE_ALIGN); 267 if (mz == NULL) { 268 QAT_LOG(ERR, 269 "Can't allocate intermediate buffers for device %s", 270 comp_dev->qat_dev->name); 271 while (--i >= 0) { 272 snprintf(inter_buff_mz_name, 273 RTE_MEMZONE_NAMESIZE, 274 "%s_inter_buff_%d", 275 comp_dev->qat_dev->name, 276 i); 277 rte_memzone_free( 278 rte_memzone_lookup( 279 inter_buff_mz_name)); 280 } 281 rte_memzone_free(memzone); 282 return NULL; 283 } 284 } 285 286 QAT_LOG(DEBUG, "Memzone %s: addr = %p, phys = 0x%"PRIx64 287 ", size required %d, size created %zu", 288 inter_buff_mz_name, mz->addr, mz->iova, 289 full_size, mz->len); 290 291 array_of_pointers->pointer[i] = mz->iova; 292 293 sgl = (struct qat_inter_sgl *) mz->addr; 294 sgl->num_bufs = QAT_NUM_BUFS_IN_IM_SGL; 295 sgl->num_mapped_bufs = 0; 296 sgl->resrvd = 0; 297 298 #if QAT_IM_BUFFER_DEBUG 299 QAT_LOG(DEBUG, " : phys addr of sgl[%i] in array_of_pointers" 300 " = 0x%"PRIx64, i, array_of_pointers->pointer[i]); 301 QAT_LOG(DEBUG, " : virt address of sgl[%i] = %p", i, sgl); 302 #endif 303 for (lb = 0; lb < QAT_NUM_BUFS_IN_IM_SGL; lb++) { 304 sgl->buffers[lb].addr = 305 mz->iova + offset_of_flat_buffs + 306 lb * buff_size; 307 sgl->buffers[lb].len = buff_size; 308 sgl->buffers[lb].resrvd = 0; 309 #if QAT_IM_BUFFER_DEBUG 310 QAT_LOG(DEBUG, 311 " : sgl->buffers[%d].addr = 0x%"PRIx64", len=%d", 312 lb, sgl->buffers[lb].addr, sgl->buffers[lb].len); 313 #endif 314 } 315 } 316 #if QAT_IM_BUFFER_DEBUG 317 QAT_DP_HEXDUMP_LOG(DEBUG, "IM buffer memzone start:", 318 memzone->addr, size_of_ptr_array); 319 #endif 320 return memzone; 321 } 322 323 static struct rte_mempool * 324 qat_comp_create_xform_pool(struct qat_comp_dev_private *comp_dev, 325 struct rte_compressdev_config *config, 326 uint32_t num_elements) 327 { 328 char xform_pool_name[RTE_MEMPOOL_NAMESIZE]; 329 struct rte_mempool *mp; 330 331 snprintf(xform_pool_name, RTE_MEMPOOL_NAMESIZE, 332 "%s_xforms", comp_dev->qat_dev->name); 333 334 QAT_LOG(DEBUG, "xformpool: %s", xform_pool_name); 335 mp = rte_mempool_lookup(xform_pool_name); 336 337 if (mp != NULL) { 338 QAT_LOG(DEBUG, "xformpool already created"); 339 if (mp->size != num_elements) { 340 QAT_LOG(DEBUG, "xformpool wrong size - delete it"); 341 rte_mempool_free(mp); 342 mp = NULL; 343 comp_dev->xformpool = NULL; 344 } 345 } 346 347 if (mp == NULL) 348 mp = rte_mempool_create(xform_pool_name, 349 num_elements, 350 qat_comp_xform_size(), 0, 0, 351 NULL, NULL, NULL, NULL, config->socket_id, 352 0); 353 if (mp == NULL) { 354 QAT_LOG(ERR, "Err creating mempool %s w %d elements of size %d", 355 xform_pool_name, num_elements, qat_comp_xform_size()); 356 return NULL; 357 } 358 359 return mp; 360 } 361 362 static void 363 qat_comp_stream_init(struct rte_mempool *mp __rte_unused, void *opaque, 364 void *obj, unsigned int obj_idx) 365 { 366 struct stream_create_info *info = opaque; 367 struct qat_comp_stream *stream = obj; 368 char mz_name[RTE_MEMZONE_NAMESIZE]; 369 const struct rte_memzone *memzone; 370 struct qat_inter_sgl *ram_banks_desc; 371 372 /* find a memzone for RAM banks */ 373 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "%s_%u_rambanks", 374 info->comp_dev->qat_dev->name, obj_idx); 375 memzone = rte_memzone_lookup(mz_name); 376 if (memzone == NULL) { 377 /* allocate a memzone for compression state and RAM banks */ 378 memzone = rte_memzone_reserve_aligned(mz_name, 379 QAT_STATE_REGISTERS_MAX_SIZE 380 + sizeof(struct qat_inter_sgl) 381 + QAT_INFLATE_CONTEXT_SIZE, 382 info->socket_id, 383 RTE_MEMZONE_IOVA_CONTIG, QAT_64_BYTE_ALIGN); 384 if (memzone == NULL) { 385 QAT_LOG(ERR, 386 "Can't allocate RAM banks for device %s, object %u", 387 info->comp_dev->qat_dev->name, obj_idx); 388 info->error = -ENOMEM; 389 return; 390 } 391 } 392 393 /* prepare the buffer list descriptor for RAM banks */ 394 ram_banks_desc = (struct qat_inter_sgl *) 395 (((uint8_t *) memzone->addr) + QAT_STATE_REGISTERS_MAX_SIZE); 396 ram_banks_desc->num_bufs = 1; 397 ram_banks_desc->buffers[0].len = QAT_INFLATE_CONTEXT_SIZE; 398 ram_banks_desc->buffers[0].addr = memzone->iova 399 + QAT_STATE_REGISTERS_MAX_SIZE 400 + sizeof(struct qat_inter_sgl); 401 402 memset(stream, 0, qat_comp_stream_size()); 403 stream->memzone = memzone; 404 stream->state_registers_decomp = memzone->addr; 405 stream->state_registers_decomp_phys = memzone->iova; 406 stream->inflate_context = ((uint8_t *) memzone->addr) 407 + QAT_STATE_REGISTERS_MAX_SIZE; 408 stream->inflate_context_phys = memzone->iova 409 + QAT_STATE_REGISTERS_MAX_SIZE; 410 } 411 412 static void 413 qat_comp_stream_destroy(struct rte_mempool *mp __rte_unused, 414 void *opaque __rte_unused, void *obj, 415 unsigned obj_idx __rte_unused) 416 { 417 struct qat_comp_stream *stream = obj; 418 419 rte_memzone_free(stream->memzone); 420 } 421 422 static struct rte_mempool * 423 qat_comp_create_stream_pool(struct qat_comp_dev_private *comp_dev, 424 int socket_id, 425 uint32_t num_elements) 426 { 427 char stream_pool_name[RTE_MEMPOOL_NAMESIZE]; 428 struct rte_mempool *mp; 429 430 snprintf(stream_pool_name, RTE_MEMPOOL_NAMESIZE, 431 "%s_streams", comp_dev->qat_dev->name); 432 433 QAT_LOG(DEBUG, "streampool: %s", stream_pool_name); 434 mp = rte_mempool_lookup(stream_pool_name); 435 436 if (mp != NULL) { 437 QAT_LOG(DEBUG, "streampool already created"); 438 if (mp->size != num_elements) { 439 QAT_LOG(DEBUG, "streampool wrong size - delete it"); 440 rte_mempool_obj_iter(mp, qat_comp_stream_destroy, NULL); 441 rte_mempool_free(mp); 442 mp = NULL; 443 comp_dev->streampool = NULL; 444 } 445 } 446 447 if (mp == NULL) { 448 struct stream_create_info info = { 449 .comp_dev = comp_dev, 450 .socket_id = socket_id, 451 .error = 0 452 }; 453 mp = rte_mempool_create(stream_pool_name, 454 num_elements, 455 qat_comp_stream_size(), 0, 0, 456 NULL, NULL, qat_comp_stream_init, &info, 457 socket_id, 0); 458 if (mp == NULL) { 459 QAT_LOG(ERR, 460 "Err creating mempool %s w %d elements of size %d", 461 stream_pool_name, num_elements, 462 qat_comp_stream_size()); 463 } else if (info.error) { 464 rte_mempool_obj_iter(mp, qat_comp_stream_destroy, NULL); 465 QAT_LOG(ERR, 466 "Destoying mempool %s as at least one element failed initialisation", 467 stream_pool_name); 468 rte_mempool_free(mp); 469 mp = NULL; 470 } 471 } 472 473 return mp; 474 } 475 476 static void 477 _qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev) 478 { 479 /* Free intermediate buffers */ 480 if (comp_dev->interm_buff_mz) { 481 char mz_name[RTE_MEMZONE_NAMESIZE]; 482 int i = qat_comp_get_num_im_bufs_required( 483 comp_dev->qat_dev->qat_dev_gen); 484 485 while (--i >= 0) { 486 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 487 "%s_inter_buff_%d", 488 comp_dev->qat_dev->name, i); 489 rte_memzone_free(rte_memzone_lookup(mz_name)); 490 } 491 rte_memzone_free(comp_dev->interm_buff_mz); 492 comp_dev->interm_buff_mz = NULL; 493 } 494 495 /* Free private_xform pool */ 496 if (comp_dev->xformpool) { 497 /* Free internal mempool for private xforms */ 498 rte_mempool_free(comp_dev->xformpool); 499 comp_dev->xformpool = NULL; 500 } 501 502 /* Free stream pool */ 503 if (comp_dev->streampool) { 504 rte_mempool_obj_iter(comp_dev->streampool, 505 qat_comp_stream_destroy, NULL); 506 rte_mempool_free(comp_dev->streampool); 507 comp_dev->streampool = NULL; 508 } 509 } 510 511 int 512 qat_comp_dev_config(struct rte_compressdev *dev, 513 struct rte_compressdev_config *config) 514 { 515 struct qat_comp_dev_private *comp_dev = dev->data->dev_private; 516 int ret = 0; 517 518 if (config->max_nb_priv_xforms) { 519 comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev, 520 config, config->max_nb_priv_xforms); 521 if (comp_dev->xformpool == NULL) { 522 ret = -ENOMEM; 523 goto error_out; 524 } 525 } else 526 comp_dev->xformpool = NULL; 527 528 if (config->max_nb_streams) { 529 comp_dev->streampool = qat_comp_create_stream_pool(comp_dev, 530 config->socket_id, config->max_nb_streams); 531 if (comp_dev->streampool == NULL) { 532 ret = -ENOMEM; 533 goto error_out; 534 } 535 } else 536 comp_dev->streampool = NULL; 537 538 return 0; 539 540 error_out: 541 _qat_comp_dev_config_clear(comp_dev); 542 return ret; 543 } 544 545 int 546 qat_comp_dev_start(struct rte_compressdev *dev __rte_unused) 547 { 548 return 0; 549 } 550 551 void 552 qat_comp_dev_stop(struct rte_compressdev *dev __rte_unused) 553 { 554 555 } 556 557 int 558 qat_comp_dev_close(struct rte_compressdev *dev) 559 { 560 int i; 561 int ret = 0; 562 struct qat_comp_dev_private *comp_dev = dev->data->dev_private; 563 564 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 565 ret = qat_comp_qp_release(dev, i); 566 if (ret < 0) 567 return ret; 568 } 569 570 _qat_comp_dev_config_clear(comp_dev); 571 572 return ret; 573 } 574 575 void 576 qat_comp_dev_info_get(struct rte_compressdev *dev, 577 struct rte_compressdev_info *info) 578 { 579 struct qat_comp_dev_private *comp_dev = dev->data->dev_private; 580 struct qat_pci_device *qat_dev = comp_dev->qat_dev; 581 582 if (info != NULL) { 583 info->max_nb_queue_pairs = 584 qat_qps_per_service(qat_dev, 585 QAT_SERVICE_COMPRESSION); 586 info->feature_flags = dev->feature_flags; 587 info->capabilities = comp_dev->qat_dev_capabilities; 588 } 589 } 590 591 static uint16_t 592 qat_comp_pmd_enq_deq_dummy_op_burst(void *qp __rte_unused, 593 struct rte_comp_op **ops __rte_unused, 594 uint16_t nb_ops __rte_unused) 595 { 596 QAT_DP_LOG(ERR, "QAT PMD detected wrong FW version !"); 597 return 0; 598 } 599 600 static struct rte_compressdev_ops compress_qat_dummy_ops = { 601 602 /* Device related operations */ 603 .dev_configure = NULL, 604 .dev_start = NULL, 605 .dev_stop = qat_comp_dev_stop, 606 .dev_close = qat_comp_dev_close, 607 .dev_infos_get = NULL, 608 609 .stats_get = NULL, 610 .stats_reset = qat_comp_stats_reset, 611 .queue_pair_setup = NULL, 612 .queue_pair_release = qat_comp_qp_release, 613 614 /* Compression related operations */ 615 .private_xform_create = NULL, 616 .private_xform_free = qat_comp_private_xform_free 617 }; 618 619 static uint16_t 620 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops, 621 uint16_t nb_ops) 622 { 623 uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops); 624 struct qat_qp *tmp_qp = (struct qat_qp *)qp; 625 626 if (ret) { 627 if ((*ops)->debug_status == 628 (uint64_t)ERR_CODE_QAT_COMP_WRONG_FW) { 629 tmp_qp->qat_dev->comp_dev->compressdev->enqueue_burst = 630 qat_comp_pmd_enq_deq_dummy_op_burst; 631 tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst = 632 qat_comp_pmd_enq_deq_dummy_op_burst; 633 634 tmp_qp->qat_dev->comp_dev->compressdev->dev_ops = 635 &compress_qat_dummy_ops; 636 QAT_LOG(ERR, 637 "This QAT hardware doesn't support compression operation"); 638 639 } else { 640 tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst = 641 (compressdev_dequeue_pkt_burst_t) 642 qat_dequeue_op_burst; 643 } 644 } 645 return ret; 646 } 647 648 /* An rte_driver is needed in the registration of the device with compressdev. 649 * The actual qat pci's rte_driver can't be used as its name represents 650 * the whole pci device with all services. Think of this as a holder for a name 651 * for the compression part of the pci device. 652 */ 653 static const char qat_comp_drv_name[] = RTE_STR(COMPRESSDEV_NAME_QAT_PMD); 654 static const struct rte_driver compdev_qat_driver = { 655 .name = qat_comp_drv_name, 656 .alias = qat_comp_drv_name 657 }; 658 659 int 660 qat_comp_dev_create(struct qat_pci_device *qat_pci_dev, 661 struct qat_dev_cmd_param *qat_dev_cmd_param) 662 { 663 int i = 0; 664 struct qat_device_info *qat_dev_instance = 665 &qat_pci_devs[qat_pci_dev->qat_dev_id]; 666 struct rte_compressdev_pmd_init_params init_params = { 667 .name = "", 668 .socket_id = qat_dev_instance->pci_dev->device.numa_node, 669 }; 670 char name[RTE_COMPRESSDEV_NAME_MAX_LEN]; 671 char capa_memz_name[RTE_COMPRESSDEV_NAME_MAX_LEN]; 672 struct rte_compressdev *compressdev; 673 struct qat_comp_dev_private *comp_dev; 674 struct qat_comp_capabilities_info capabilities_info; 675 const struct rte_compressdev_capabilities *capabilities; 676 const struct qat_comp_gen_dev_ops *qat_comp_gen_ops = 677 &qat_comp_gen_dev_ops[qat_pci_dev->qat_dev_gen]; 678 uint64_t capa_size; 679 680 snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN, "%s_%s", 681 qat_pci_dev->name, "comp"); 682 QAT_LOG(DEBUG, "Creating QAT COMP device %s", name); 683 684 if (qat_comp_gen_ops->compressdev_ops == NULL) { 685 QAT_LOG(DEBUG, "Device %s does not support compression", name); 686 return -ENOTSUP; 687 } 688 689 /* Populate subset device to use in compressdev device creation */ 690 qat_dev_instance->comp_rte_dev.driver = &compdev_qat_driver; 691 qat_dev_instance->comp_rte_dev.numa_node = 692 qat_dev_instance->pci_dev->device.numa_node; 693 qat_dev_instance->comp_rte_dev.devargs = NULL; 694 695 compressdev = rte_compressdev_pmd_create(name, 696 &(qat_dev_instance->comp_rte_dev), 697 sizeof(struct qat_comp_dev_private), 698 &init_params); 699 700 if (compressdev == NULL) 701 return -ENODEV; 702 703 compressdev->dev_ops = qat_comp_gen_ops->compressdev_ops; 704 705 compressdev->enqueue_burst = (compressdev_enqueue_pkt_burst_t) 706 qat_enqueue_comp_op_burst; 707 compressdev->dequeue_burst = qat_comp_pmd_dequeue_first_op_burst; 708 compressdev->feature_flags = 709 qat_comp_gen_ops->qat_comp_get_feature_flags(); 710 711 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 712 return 0; 713 714 snprintf(capa_memz_name, RTE_COMPRESSDEV_NAME_MAX_LEN, 715 "QAT_COMP_CAPA_GEN_%d", 716 qat_pci_dev->qat_dev_gen); 717 718 comp_dev = compressdev->data->dev_private; 719 comp_dev->qat_dev = qat_pci_dev; 720 comp_dev->compressdev = compressdev; 721 722 capabilities_info = qat_comp_get_capa_info(qat_pci_dev->qat_dev_gen, 723 qat_pci_dev); 724 725 if (capabilities_info.data == NULL) { 726 QAT_LOG(DEBUG, 727 "QAT gen %d capabilities unknown, default to GEN1", 728 qat_pci_dev->qat_dev_gen); 729 capabilities_info = qat_comp_get_capa_info(QAT_GEN1, 730 qat_pci_dev); 731 } 732 733 capabilities = capabilities_info.data; 734 capa_size = capabilities_info.size; 735 736 comp_dev->capa_mz = rte_memzone_lookup(capa_memz_name); 737 if (comp_dev->capa_mz == NULL) { 738 comp_dev->capa_mz = rte_memzone_reserve(capa_memz_name, 739 capa_size, 740 rte_socket_id(), 0); 741 } 742 if (comp_dev->capa_mz == NULL) { 743 QAT_LOG(DEBUG, 744 "Error allocating memzone for capabilities, destroying PMD for %s", 745 name); 746 memset(&qat_dev_instance->comp_rte_dev, 0, 747 sizeof(qat_dev_instance->comp_rte_dev)); 748 rte_compressdev_pmd_destroy(compressdev); 749 return -EFAULT; 750 } 751 752 memcpy(comp_dev->capa_mz->addr, capabilities, capa_size); 753 comp_dev->qat_dev_capabilities = comp_dev->capa_mz->addr; 754 755 while (1) { 756 if (qat_dev_cmd_param[i].name == NULL) 757 break; 758 if (!strcmp(qat_dev_cmd_param[i].name, COMP_ENQ_THRESHOLD_NAME)) 759 comp_dev->min_enq_burst_threshold = 760 qat_dev_cmd_param[i].val; 761 i++; 762 } 763 qat_pci_dev->comp_dev = comp_dev; 764 765 QAT_LOG(DEBUG, 766 "Created QAT COMP device %s as compressdev instance %d", 767 name, compressdev->data->dev_id); 768 return 0; 769 } 770 771 int 772 qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev) 773 { 774 struct qat_comp_dev_private *comp_dev; 775 776 if (qat_pci_dev == NULL) 777 return -ENODEV; 778 779 comp_dev = qat_pci_dev->comp_dev; 780 if (comp_dev == NULL) 781 return 0; 782 783 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 784 rte_memzone_free(qat_pci_dev->comp_dev->capa_mz); 785 786 /* clean up any resources used by the device */ 787 qat_comp_dev_close(comp_dev->compressdev); 788 789 rte_compressdev_pmd_destroy(comp_dev->compressdev); 790 qat_pci_dev->comp_dev = NULL; 791 792 return 0; 793 } 794