1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2019 Intel Corporation 3 */ 4 5 #include <rte_malloc.h> 6 7 #include "qat_comp.h" 8 #include "qat_comp_pmd.h" 9 10 #define QAT_PMD_COMP_SGL_DEF_SEGMENTS 16 11 12 struct stream_create_info { 13 struct qat_comp_dev_private *comp_dev; 14 int socket_id; 15 int error; 16 }; 17 18 static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = { 19 {/* COMPRESSION - deflate */ 20 .algo = RTE_COMP_ALGO_DEFLATE, 21 .comp_feature_flags = RTE_COMP_FF_MULTI_PKT_CHECKSUM | 22 RTE_COMP_FF_CRC32_CHECKSUM | 23 RTE_COMP_FF_ADLER32_CHECKSUM | 24 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM | 25 RTE_COMP_FF_SHAREABLE_PRIV_XFORM | 26 RTE_COMP_FF_HUFFMAN_FIXED | 27 RTE_COMP_FF_HUFFMAN_DYNAMIC | 28 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT | 29 RTE_COMP_FF_OOP_SGL_IN_LB_OUT | 30 RTE_COMP_FF_OOP_LB_IN_SGL_OUT | 31 RTE_COMP_FF_STATEFUL_DECOMPRESSION, 32 .window_size = {.min = 15, .max = 15, .increment = 0} }, 33 {RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } }; 34 35 static void 36 qat_comp_stats_get(struct rte_compressdev *dev, 37 struct rte_compressdev_stats *stats) 38 { 39 struct qat_common_stats qat_stats = {0}; 40 struct qat_comp_dev_private *qat_priv; 41 42 if (stats == NULL || dev == NULL) { 43 QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev); 44 return; 45 } 46 qat_priv = dev->data->dev_private; 47 48 qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_COMPRESSION); 49 stats->enqueued_count = qat_stats.enqueued_count; 50 stats->dequeued_count = qat_stats.dequeued_count; 51 stats->enqueue_err_count = qat_stats.enqueue_err_count; 52 stats->dequeue_err_count = qat_stats.dequeue_err_count; 53 } 54 55 static void 56 qat_comp_stats_reset(struct rte_compressdev *dev) 57 { 58 struct qat_comp_dev_private *qat_priv; 59 60 if (dev == NULL) { 61 QAT_LOG(ERR, "invalid compressdev ptr %p", dev); 62 return; 63 } 64 qat_priv = dev->data->dev_private; 65 66 qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_COMPRESSION); 67 68 } 69 70 static int 71 qat_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id) 72 { 73 struct qat_comp_dev_private *qat_private = dev->data->dev_private; 74 struct qat_qp **qp_addr = 75 (struct qat_qp **)&(dev->data->queue_pairs[queue_pair_id]); 76 struct qat_qp *qp = (struct qat_qp *)*qp_addr; 77 uint32_t i; 78 79 QAT_LOG(DEBUG, "Release comp qp %u on device %d", 80 queue_pair_id, dev->data->dev_id); 81 82 qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][queue_pair_id] 83 = NULL; 84 85 for (i = 0; i < qp->nb_descriptors; i++) { 86 87 struct qat_comp_op_cookie *cookie = qp->op_cookies[i]; 88 89 rte_free(cookie->qat_sgl_src_d); 90 rte_free(cookie->qat_sgl_dst_d); 91 } 92 93 return qat_qp_release((struct qat_qp **) 94 &(dev->data->queue_pairs[queue_pair_id])); 95 } 96 97 static int 98 qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id, 99 uint32_t max_inflight_ops, int socket_id) 100 { 101 struct qat_qp *qp; 102 int ret = 0; 103 uint32_t i; 104 struct qat_qp_config qat_qp_conf; 105 106 struct qat_qp **qp_addr = 107 (struct qat_qp **)&(dev->data->queue_pairs[qp_id]); 108 struct qat_comp_dev_private *qat_private = dev->data->dev_private; 109 const struct qat_qp_hw_data *comp_hw_qps = 110 qat_gen_config[qat_private->qat_dev->qat_dev_gen] 111 .qp_hw_data[QAT_SERVICE_COMPRESSION]; 112 const struct qat_qp_hw_data *qp_hw_data = comp_hw_qps + qp_id; 113 114 /* If qp is already in use free ring memory and qp metadata. */ 115 if (*qp_addr != NULL) { 116 ret = qat_comp_qp_release(dev, qp_id); 117 if (ret < 0) 118 return ret; 119 } 120 if (qp_id >= qat_qps_per_service(comp_hw_qps, 121 QAT_SERVICE_COMPRESSION)) { 122 QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id); 123 return -EINVAL; 124 } 125 126 qat_qp_conf.hw = qp_hw_data; 127 qat_qp_conf.cookie_size = sizeof(struct qat_comp_op_cookie); 128 qat_qp_conf.nb_descriptors = max_inflight_ops; 129 qat_qp_conf.socket_id = socket_id; 130 qat_qp_conf.service_str = "comp"; 131 132 ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf); 133 if (ret != 0) 134 return ret; 135 136 /* store a link to the qp in the qat_pci_device */ 137 qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][qp_id] 138 = *qp_addr; 139 140 qp = (struct qat_qp *)*qp_addr; 141 qp->min_enq_burst_threshold = qat_private->min_enq_burst_threshold; 142 143 for (i = 0; i < qp->nb_descriptors; i++) { 144 145 struct qat_comp_op_cookie *cookie = 146 qp->op_cookies[i]; 147 148 cookie->qp = qp; 149 cookie->cookie_index = i; 150 151 cookie->qat_sgl_src_d = rte_zmalloc_socket(NULL, 152 sizeof(struct qat_sgl) + 153 sizeof(struct qat_flat_buf) * 154 QAT_PMD_COMP_SGL_DEF_SEGMENTS, 155 64, dev->data->socket_id); 156 157 cookie->qat_sgl_dst_d = rte_zmalloc_socket(NULL, 158 sizeof(struct qat_sgl) + 159 sizeof(struct qat_flat_buf) * 160 QAT_PMD_COMP_SGL_DEF_SEGMENTS, 161 64, dev->data->socket_id); 162 163 if (cookie->qat_sgl_src_d == NULL || 164 cookie->qat_sgl_dst_d == NULL) { 165 QAT_LOG(ERR, "Can't allocate SGL" 166 " for device %s", 167 qat_private->qat_dev->name); 168 return -ENOMEM; 169 } 170 171 cookie->qat_sgl_src_phys_addr = 172 rte_malloc_virt2iova(cookie->qat_sgl_src_d); 173 174 cookie->qat_sgl_dst_phys_addr = 175 rte_malloc_virt2iova(cookie->qat_sgl_dst_d); 176 177 cookie->dst_nb_elems = cookie->src_nb_elems = 178 QAT_PMD_COMP_SGL_DEF_SEGMENTS; 179 180 cookie->socket_id = dev->data->socket_id; 181 182 cookie->error = 0; 183 } 184 185 return ret; 186 } 187 188 189 #define QAT_IM_BUFFER_DEBUG 0 190 static const struct rte_memzone * 191 qat_comp_setup_inter_buffers(struct qat_comp_dev_private *comp_dev, 192 uint32_t buff_size) 193 { 194 char inter_buff_mz_name[RTE_MEMZONE_NAMESIZE]; 195 const struct rte_memzone *memzone; 196 uint8_t *mz_start = NULL; 197 rte_iova_t mz_start_phys = 0; 198 struct array_of_ptrs *array_of_pointers; 199 int size_of_ptr_array; 200 uint32_t full_size; 201 uint32_t offset_of_sgls, offset_of_flat_buffs = 0; 202 int i; 203 int num_im_sgls = qat_gen_config[ 204 comp_dev->qat_dev->qat_dev_gen].comp_num_im_bufs_required; 205 206 QAT_LOG(DEBUG, "QAT COMP device %s needs %d sgls", 207 comp_dev->qat_dev->name, num_im_sgls); 208 snprintf(inter_buff_mz_name, RTE_MEMZONE_NAMESIZE, 209 "%s_inter_buff", comp_dev->qat_dev->name); 210 memzone = rte_memzone_lookup(inter_buff_mz_name); 211 if (memzone != NULL) { 212 QAT_LOG(DEBUG, "QAT COMP im buffer memzone created already"); 213 return memzone; 214 } 215 216 /* Create a memzone to hold intermediate buffers and associated 217 * meta-data needed by the firmware. The memzone contains 3 parts: 218 * - a list of num_im_sgls physical pointers to sgls 219 * - the num_im_sgl sgl structures, each pointing to 220 * QAT_NUM_BUFS_IN_IM_SGL flat buffers 221 * - the flat buffers: num_im_sgl * QAT_NUM_BUFS_IN_IM_SGL 222 * buffers, each of buff_size 223 * num_im_sgls depends on the hardware generation of the device 224 * buff_size comes from the user via the config file 225 */ 226 227 size_of_ptr_array = num_im_sgls * sizeof(phys_addr_t); 228 offset_of_sgls = (size_of_ptr_array + (~QAT_64_BYTE_ALIGN_MASK)) 229 & QAT_64_BYTE_ALIGN_MASK; 230 offset_of_flat_buffs = 231 offset_of_sgls + num_im_sgls * sizeof(struct qat_inter_sgl); 232 full_size = offset_of_flat_buffs + 233 num_im_sgls * buff_size * QAT_NUM_BUFS_IN_IM_SGL; 234 235 memzone = rte_memzone_reserve_aligned(inter_buff_mz_name, full_size, 236 comp_dev->compressdev->data->socket_id, 237 RTE_MEMZONE_IOVA_CONTIG, QAT_64_BYTE_ALIGN); 238 if (memzone == NULL) { 239 QAT_LOG(ERR, "Can't allocate intermediate buffers" 240 " for device %s", comp_dev->qat_dev->name); 241 return NULL; 242 } 243 244 mz_start = (uint8_t *)memzone->addr; 245 mz_start_phys = memzone->phys_addr; 246 QAT_LOG(DEBUG, "Memzone %s: addr = %p, phys = 0x%"PRIx64 247 ", size required %d, size created %zu", 248 inter_buff_mz_name, mz_start, mz_start_phys, 249 full_size, memzone->len); 250 251 array_of_pointers = (struct array_of_ptrs *)mz_start; 252 for (i = 0; i < num_im_sgls; i++) { 253 uint32_t curr_sgl_offset = 254 offset_of_sgls + i * sizeof(struct qat_inter_sgl); 255 struct qat_inter_sgl *sgl = 256 (struct qat_inter_sgl *)(mz_start + curr_sgl_offset); 257 int lb; 258 array_of_pointers->pointer[i] = mz_start_phys + curr_sgl_offset; 259 260 sgl->num_bufs = QAT_NUM_BUFS_IN_IM_SGL; 261 sgl->num_mapped_bufs = 0; 262 sgl->resrvd = 0; 263 264 #if QAT_IM_BUFFER_DEBUG 265 QAT_LOG(DEBUG, " : phys addr of sgl[%i] in array_of_pointers" 266 " = 0x%"PRIx64, i, array_of_pointers->pointer[i]); 267 QAT_LOG(DEBUG, " : virt address of sgl[%i] = %p", i, sgl); 268 #endif 269 for (lb = 0; lb < QAT_NUM_BUFS_IN_IM_SGL; lb++) { 270 sgl->buffers[lb].addr = 271 mz_start_phys + offset_of_flat_buffs + 272 (((i * QAT_NUM_BUFS_IN_IM_SGL) + lb) * buff_size); 273 sgl->buffers[lb].len = buff_size; 274 sgl->buffers[lb].resrvd = 0; 275 #if QAT_IM_BUFFER_DEBUG 276 QAT_LOG(DEBUG, 277 " : sgl->buffers[%d].addr = 0x%"PRIx64", len=%d", 278 lb, sgl->buffers[lb].addr, sgl->buffers[lb].len); 279 #endif 280 } 281 } 282 #if QAT_IM_BUFFER_DEBUG 283 QAT_DP_HEXDUMP_LOG(DEBUG, "IM buffer memzone start:", 284 mz_start, offset_of_flat_buffs + 32); 285 #endif 286 return memzone; 287 } 288 289 static struct rte_mempool * 290 qat_comp_create_xform_pool(struct qat_comp_dev_private *comp_dev, 291 struct rte_compressdev_config *config, 292 uint32_t num_elements) 293 { 294 char xform_pool_name[RTE_MEMPOOL_NAMESIZE]; 295 struct rte_mempool *mp; 296 297 snprintf(xform_pool_name, RTE_MEMPOOL_NAMESIZE, 298 "%s_xforms", comp_dev->qat_dev->name); 299 300 QAT_LOG(DEBUG, "xformpool: %s", xform_pool_name); 301 mp = rte_mempool_lookup(xform_pool_name); 302 303 if (mp != NULL) { 304 QAT_LOG(DEBUG, "xformpool already created"); 305 if (mp->size != num_elements) { 306 QAT_LOG(DEBUG, "xformpool wrong size - delete it"); 307 rte_mempool_free(mp); 308 mp = NULL; 309 comp_dev->xformpool = NULL; 310 } 311 } 312 313 if (mp == NULL) 314 mp = rte_mempool_create(xform_pool_name, 315 num_elements, 316 qat_comp_xform_size(), 0, 0, 317 NULL, NULL, NULL, NULL, config->socket_id, 318 0); 319 if (mp == NULL) { 320 QAT_LOG(ERR, "Err creating mempool %s w %d elements of size %d", 321 xform_pool_name, num_elements, qat_comp_xform_size()); 322 return NULL; 323 } 324 325 return mp; 326 } 327 328 static void 329 qat_comp_stream_init(struct rte_mempool *mp __rte_unused, void *opaque, 330 void *obj, unsigned int obj_idx) 331 { 332 struct stream_create_info *info = opaque; 333 struct qat_comp_stream *stream = obj; 334 char mz_name[RTE_MEMZONE_NAMESIZE]; 335 const struct rte_memzone *memzone; 336 struct qat_inter_sgl *ram_banks_desc; 337 338 /* find a memzone for RAM banks */ 339 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "%s_%u_rambanks", 340 info->comp_dev->qat_dev->name, obj_idx); 341 memzone = rte_memzone_lookup(mz_name); 342 if (memzone == NULL) { 343 /* allocate a memzone for compression state and RAM banks */ 344 memzone = rte_memzone_reserve_aligned(mz_name, 345 QAT_STATE_REGISTERS_MAX_SIZE 346 + sizeof(struct qat_inter_sgl) 347 + QAT_INFLATE_CONTEXT_SIZE, 348 info->socket_id, 349 RTE_MEMZONE_IOVA_CONTIG, QAT_64_BYTE_ALIGN); 350 if (memzone == NULL) { 351 QAT_LOG(ERR, 352 "Can't allocate RAM banks for device %s, object %u", 353 info->comp_dev->qat_dev->name, obj_idx); 354 info->error = -ENOMEM; 355 return; 356 } 357 } 358 359 /* prepare the buffer list descriptor for RAM banks */ 360 ram_banks_desc = (struct qat_inter_sgl *) 361 (((uint8_t *) memzone->addr) + QAT_STATE_REGISTERS_MAX_SIZE); 362 ram_banks_desc->num_bufs = 1; 363 ram_banks_desc->buffers[0].len = QAT_INFLATE_CONTEXT_SIZE; 364 ram_banks_desc->buffers[0].addr = memzone->iova 365 + QAT_STATE_REGISTERS_MAX_SIZE 366 + sizeof(struct qat_inter_sgl); 367 368 memset(stream, 0, qat_comp_stream_size()); 369 stream->memzone = memzone; 370 stream->state_registers_decomp = memzone->addr; 371 stream->state_registers_decomp_phys = memzone->iova; 372 stream->inflate_context = ((uint8_t *) memzone->addr) 373 + QAT_STATE_REGISTERS_MAX_SIZE; 374 stream->inflate_context_phys = memzone->iova 375 + QAT_STATE_REGISTERS_MAX_SIZE; 376 } 377 378 static void 379 qat_comp_stream_destroy(struct rte_mempool *mp __rte_unused, 380 void *opaque __rte_unused, void *obj, 381 unsigned obj_idx __rte_unused) 382 { 383 struct qat_comp_stream *stream = obj; 384 385 rte_memzone_free(stream->memzone); 386 } 387 388 static struct rte_mempool * 389 qat_comp_create_stream_pool(struct qat_comp_dev_private *comp_dev, 390 int socket_id, 391 uint32_t num_elements) 392 { 393 char stream_pool_name[RTE_MEMPOOL_NAMESIZE]; 394 struct rte_mempool *mp; 395 396 snprintf(stream_pool_name, RTE_MEMPOOL_NAMESIZE, 397 "%s_streams", comp_dev->qat_dev->name); 398 399 QAT_LOG(DEBUG, "streampool: %s", stream_pool_name); 400 mp = rte_mempool_lookup(stream_pool_name); 401 402 if (mp != NULL) { 403 QAT_LOG(DEBUG, "streampool already created"); 404 if (mp->size != num_elements) { 405 QAT_LOG(DEBUG, "streampool wrong size - delete it"); 406 rte_mempool_obj_iter(mp, qat_comp_stream_destroy, NULL); 407 rte_mempool_free(mp); 408 mp = NULL; 409 comp_dev->streampool = NULL; 410 } 411 } 412 413 if (mp == NULL) { 414 struct stream_create_info info = { 415 .comp_dev = comp_dev, 416 .socket_id = socket_id, 417 .error = 0 418 }; 419 mp = rte_mempool_create(stream_pool_name, 420 num_elements, 421 qat_comp_stream_size(), 0, 0, 422 NULL, NULL, qat_comp_stream_init, &info, 423 socket_id, 0); 424 if (mp == NULL) { 425 QAT_LOG(ERR, 426 "Err creating mempool %s w %d elements of size %d", 427 stream_pool_name, num_elements, 428 qat_comp_stream_size()); 429 } else if (info.error) { 430 rte_mempool_obj_iter(mp, qat_comp_stream_destroy, NULL); 431 QAT_LOG(ERR, 432 "Destoying mempool %s as at least one element failed initialisation", 433 stream_pool_name); 434 rte_mempool_free(mp); 435 mp = NULL; 436 } 437 } 438 439 return mp; 440 } 441 442 static void 443 _qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev) 444 { 445 /* Free intermediate buffers */ 446 if (comp_dev->interm_buff_mz) { 447 rte_memzone_free(comp_dev->interm_buff_mz); 448 comp_dev->interm_buff_mz = NULL; 449 } 450 451 /* Free private_xform pool */ 452 if (comp_dev->xformpool) { 453 /* Free internal mempool for private xforms */ 454 rte_mempool_free(comp_dev->xformpool); 455 comp_dev->xformpool = NULL; 456 } 457 458 /* Free stream pool */ 459 if (comp_dev->streampool) { 460 rte_mempool_obj_iter(comp_dev->streampool, 461 qat_comp_stream_destroy, NULL); 462 rte_mempool_free(comp_dev->streampool); 463 comp_dev->streampool = NULL; 464 } 465 } 466 467 static int 468 qat_comp_dev_config(struct rte_compressdev *dev, 469 struct rte_compressdev_config *config) 470 { 471 struct qat_comp_dev_private *comp_dev = dev->data->dev_private; 472 int ret = 0; 473 474 if (RTE_PMD_QAT_COMP_IM_BUFFER_SIZE == 0) { 475 QAT_LOG(WARNING, 476 "RTE_PMD_QAT_COMP_IM_BUFFER_SIZE = 0 in config file, so" 477 " QAT device can't be used for Dynamic Deflate. " 478 "Did you really intend to do this?"); 479 } else { 480 comp_dev->interm_buff_mz = 481 qat_comp_setup_inter_buffers(comp_dev, 482 RTE_PMD_QAT_COMP_IM_BUFFER_SIZE); 483 if (comp_dev->interm_buff_mz == NULL) { 484 ret = -ENOMEM; 485 goto error_out; 486 } 487 } 488 489 if (config->max_nb_priv_xforms) { 490 comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev, 491 config, config->max_nb_priv_xforms); 492 if (comp_dev->xformpool == NULL) { 493 ret = -ENOMEM; 494 goto error_out; 495 } 496 } else 497 comp_dev->xformpool = NULL; 498 499 if (config->max_nb_streams) { 500 comp_dev->streampool = qat_comp_create_stream_pool(comp_dev, 501 config->socket_id, config->max_nb_streams); 502 if (comp_dev->streampool == NULL) { 503 ret = -ENOMEM; 504 goto error_out; 505 } 506 } else 507 comp_dev->streampool = NULL; 508 509 return 0; 510 511 error_out: 512 _qat_comp_dev_config_clear(comp_dev); 513 return ret; 514 } 515 516 static int 517 qat_comp_dev_start(struct rte_compressdev *dev __rte_unused) 518 { 519 return 0; 520 } 521 522 static void 523 qat_comp_dev_stop(struct rte_compressdev *dev __rte_unused) 524 { 525 526 } 527 528 static int 529 qat_comp_dev_close(struct rte_compressdev *dev) 530 { 531 int i; 532 int ret = 0; 533 struct qat_comp_dev_private *comp_dev = dev->data->dev_private; 534 535 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 536 ret = qat_comp_qp_release(dev, i); 537 if (ret < 0) 538 return ret; 539 } 540 541 _qat_comp_dev_config_clear(comp_dev); 542 543 return ret; 544 } 545 546 547 static void 548 qat_comp_dev_info_get(struct rte_compressdev *dev, 549 struct rte_compressdev_info *info) 550 { 551 struct qat_comp_dev_private *comp_dev = dev->data->dev_private; 552 const struct qat_qp_hw_data *comp_hw_qps = 553 qat_gen_config[comp_dev->qat_dev->qat_dev_gen] 554 .qp_hw_data[QAT_SERVICE_COMPRESSION]; 555 556 if (info != NULL) { 557 info->max_nb_queue_pairs = 558 qat_qps_per_service(comp_hw_qps, 559 QAT_SERVICE_COMPRESSION); 560 info->feature_flags = dev->feature_flags; 561 info->capabilities = comp_dev->qat_dev_capabilities; 562 } 563 } 564 565 static uint16_t 566 qat_comp_pmd_enq_deq_dummy_op_burst(void *qp __rte_unused, 567 struct rte_comp_op **ops __rte_unused, 568 uint16_t nb_ops __rte_unused) 569 { 570 QAT_DP_LOG(ERR, "QAT PMD detected wrong FW version !"); 571 return 0; 572 } 573 574 static struct rte_compressdev_ops compress_qat_dummy_ops = { 575 576 /* Device related operations */ 577 .dev_configure = NULL, 578 .dev_start = NULL, 579 .dev_stop = qat_comp_dev_stop, 580 .dev_close = qat_comp_dev_close, 581 .dev_infos_get = NULL, 582 583 .stats_get = NULL, 584 .stats_reset = qat_comp_stats_reset, 585 .queue_pair_setup = NULL, 586 .queue_pair_release = qat_comp_qp_release, 587 588 /* Compression related operations */ 589 .private_xform_create = NULL, 590 .private_xform_free = qat_comp_private_xform_free 591 }; 592 593 static uint16_t 594 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops, 595 uint16_t nb_ops) 596 { 597 uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops); 598 struct qat_qp *tmp_qp = (struct qat_qp *)qp; 599 600 if (ret) { 601 if ((*ops)->debug_status == 602 (uint64_t)ERR_CODE_QAT_COMP_WRONG_FW) { 603 tmp_qp->qat_dev->comp_dev->compressdev->enqueue_burst = 604 qat_comp_pmd_enq_deq_dummy_op_burst; 605 tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst = 606 qat_comp_pmd_enq_deq_dummy_op_burst; 607 608 tmp_qp->qat_dev->comp_dev->compressdev->dev_ops = 609 &compress_qat_dummy_ops; 610 QAT_LOG(ERR, "QAT PMD detected wrong FW version !"); 611 612 } else { 613 tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst = 614 (compressdev_dequeue_pkt_burst_t) 615 qat_dequeue_op_burst; 616 } 617 } 618 return ret; 619 } 620 621 static struct rte_compressdev_ops compress_qat_ops = { 622 623 /* Device related operations */ 624 .dev_configure = qat_comp_dev_config, 625 .dev_start = qat_comp_dev_start, 626 .dev_stop = qat_comp_dev_stop, 627 .dev_close = qat_comp_dev_close, 628 .dev_infos_get = qat_comp_dev_info_get, 629 630 .stats_get = qat_comp_stats_get, 631 .stats_reset = qat_comp_stats_reset, 632 .queue_pair_setup = qat_comp_qp_setup, 633 .queue_pair_release = qat_comp_qp_release, 634 635 /* Compression related operations */ 636 .private_xform_create = qat_comp_private_xform_create, 637 .private_xform_free = qat_comp_private_xform_free, 638 .stream_create = qat_comp_stream_create, 639 .stream_free = qat_comp_stream_free 640 }; 641 642 /* An rte_driver is needed in the registration of the device with compressdev. 643 * The actual qat pci's rte_driver can't be used as its name represents 644 * the whole pci device with all services. Think of this as a holder for a name 645 * for the compression part of the pci device. 646 */ 647 static const char qat_comp_drv_name[] = RTE_STR(COMPRESSDEV_NAME_QAT_PMD); 648 static const struct rte_driver compdev_qat_driver = { 649 .name = qat_comp_drv_name, 650 .alias = qat_comp_drv_name 651 }; 652 int 653 qat_comp_dev_create(struct qat_pci_device *qat_pci_dev, 654 struct qat_dev_cmd_param *qat_dev_cmd_param) 655 { 656 int i = 0; 657 struct qat_device_info *qat_dev_instance = 658 &qat_pci_devs[qat_pci_dev->qat_dev_id]; 659 if (qat_pci_dev->qat_dev_gen == QAT_GEN3) { 660 QAT_LOG(ERR, "Compression PMD not supported on QAT c4xxx"); 661 return 0; 662 } 663 664 struct rte_compressdev_pmd_init_params init_params = { 665 .name = "", 666 .socket_id = qat_dev_instance->pci_dev->device.numa_node, 667 }; 668 char name[RTE_COMPRESSDEV_NAME_MAX_LEN]; 669 char capa_memz_name[RTE_COMPRESSDEV_NAME_MAX_LEN]; 670 struct rte_compressdev *compressdev; 671 struct qat_comp_dev_private *comp_dev; 672 const struct rte_compressdev_capabilities *capabilities; 673 uint64_t capa_size; 674 675 snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN, "%s_%s", 676 qat_pci_dev->name, "comp"); 677 QAT_LOG(DEBUG, "Creating QAT COMP device %s", name); 678 679 /* Populate subset device to use in compressdev device creation */ 680 qat_dev_instance->comp_rte_dev.driver = &compdev_qat_driver; 681 qat_dev_instance->comp_rte_dev.numa_node = 682 qat_dev_instance->pci_dev->device.numa_node; 683 qat_dev_instance->comp_rte_dev.devargs = NULL; 684 685 compressdev = rte_compressdev_pmd_create(name, 686 &(qat_dev_instance->comp_rte_dev), 687 sizeof(struct qat_comp_dev_private), 688 &init_params); 689 690 if (compressdev == NULL) 691 return -ENODEV; 692 693 compressdev->dev_ops = &compress_qat_ops; 694 695 compressdev->enqueue_burst = (compressdev_enqueue_pkt_burst_t) 696 qat_enqueue_comp_op_burst; 697 compressdev->dequeue_burst = qat_comp_pmd_dequeue_first_op_burst; 698 699 compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED; 700 701 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 702 return 0; 703 704 snprintf(capa_memz_name, RTE_COMPRESSDEV_NAME_MAX_LEN, 705 "QAT_COMP_CAPA_GEN_%d", 706 qat_pci_dev->qat_dev_gen); 707 708 comp_dev = compressdev->data->dev_private; 709 comp_dev->qat_dev = qat_pci_dev; 710 comp_dev->compressdev = compressdev; 711 712 switch (qat_pci_dev->qat_dev_gen) { 713 case QAT_GEN1: 714 case QAT_GEN2: 715 case QAT_GEN3: 716 capabilities = qat_comp_gen_capabilities; 717 capa_size = sizeof(qat_comp_gen_capabilities); 718 break; 719 default: 720 capabilities = qat_comp_gen_capabilities; 721 capa_size = sizeof(qat_comp_gen_capabilities); 722 QAT_LOG(DEBUG, 723 "QAT gen %d capabilities unknown, default to GEN1", 724 qat_pci_dev->qat_dev_gen); 725 break; 726 } 727 728 comp_dev->capa_mz = rte_memzone_lookup(capa_memz_name); 729 if (comp_dev->capa_mz == NULL) { 730 comp_dev->capa_mz = rte_memzone_reserve(capa_memz_name, 731 capa_size, 732 rte_socket_id(), 0); 733 } 734 if (comp_dev->capa_mz == NULL) { 735 QAT_LOG(DEBUG, 736 "Error allocating memzone for capabilities, destroying PMD for %s", 737 name); 738 memset(&qat_dev_instance->comp_rte_dev, 0, 739 sizeof(qat_dev_instance->comp_rte_dev)); 740 rte_compressdev_pmd_destroy(compressdev); 741 return -EFAULT; 742 } 743 744 memcpy(comp_dev->capa_mz->addr, capabilities, capa_size); 745 comp_dev->qat_dev_capabilities = comp_dev->capa_mz->addr; 746 747 while (1) { 748 if (qat_dev_cmd_param[i].name == NULL) 749 break; 750 if (!strcmp(qat_dev_cmd_param[i].name, COMP_ENQ_THRESHOLD_NAME)) 751 comp_dev->min_enq_burst_threshold = 752 qat_dev_cmd_param[i].val; 753 i++; 754 } 755 qat_pci_dev->comp_dev = comp_dev; 756 757 QAT_LOG(DEBUG, 758 "Created QAT COMP device %s as compressdev instance %d", 759 name, compressdev->data->dev_id); 760 return 0; 761 } 762 763 int 764 qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev) 765 { 766 struct qat_comp_dev_private *comp_dev; 767 768 if (qat_pci_dev == NULL) 769 return -ENODEV; 770 771 comp_dev = qat_pci_dev->comp_dev; 772 if (comp_dev == NULL) 773 return 0; 774 775 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 776 rte_memzone_free(qat_pci_dev->comp_dev->capa_mz); 777 778 /* clean up any resources used by the device */ 779 qat_comp_dev_close(comp_dev->compressdev); 780 781 rte_compressdev_pmd_destroy(comp_dev->compressdev); 782 qat_pci_dev->comp_dev = NULL; 783 784 return 0; 785 } 786