1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2018 Intel Corporation 3 */ 4 5 #include "qat_comp.h" 6 #include "qat_comp_pmd.h" 7 8 static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = { 9 {/* COMPRESSION - deflate */ 10 .algo = RTE_COMP_ALGO_DEFLATE, 11 .comp_feature_flags = RTE_COMP_FF_MULTI_PKT_CHECKSUM | 12 RTE_COMP_FF_CRC32_CHECKSUM | 13 RTE_COMP_FF_ADLER32_CHECKSUM | 14 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM | 15 RTE_COMP_FF_SHAREABLE_PRIV_XFORM | 16 RTE_COMP_FF_HUFFMAN_FIXED | 17 RTE_COMP_FF_HUFFMAN_DYNAMIC | 18 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT | 19 RTE_COMP_FF_OOP_SGL_IN_LB_OUT | 20 RTE_COMP_FF_OOP_LB_IN_SGL_OUT, 21 .window_size = {.min = 15, .max = 15, .increment = 0} }, 22 {RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } }; 23 24 static void 25 qat_comp_stats_get(struct rte_compressdev *dev, 26 struct rte_compressdev_stats *stats) 27 { 28 struct qat_common_stats qat_stats = {0}; 29 struct qat_comp_dev_private *qat_priv; 30 31 if (stats == NULL || dev == NULL) { 32 QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev); 33 return; 34 } 35 qat_priv = dev->data->dev_private; 36 37 qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_COMPRESSION); 38 stats->enqueued_count = qat_stats.enqueued_count; 39 stats->dequeued_count = qat_stats.dequeued_count; 40 stats->enqueue_err_count = qat_stats.enqueue_err_count; 41 stats->dequeue_err_count = qat_stats.dequeue_err_count; 42 } 43 44 static void 45 qat_comp_stats_reset(struct rte_compressdev *dev) 46 { 47 struct qat_comp_dev_private *qat_priv; 48 49 if (dev == NULL) { 50 QAT_LOG(ERR, "invalid compressdev ptr %p", dev); 51 return; 52 } 53 qat_priv = dev->data->dev_private; 54 55 qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_COMPRESSION); 56 57 } 58 59 static int 60 qat_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id) 61 { 62 struct qat_comp_dev_private *qat_private = dev->data->dev_private; 63 64 QAT_LOG(DEBUG, "Release comp qp %u on device %d", 65 queue_pair_id, dev->data->dev_id); 66 67 qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][queue_pair_id] 68 = NULL; 69 70 return qat_qp_release((struct qat_qp **) 71 &(dev->data->queue_pairs[queue_pair_id])); 72 } 73 74 static int 75 qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id, 76 uint32_t max_inflight_ops, int socket_id) 77 { 78 struct qat_qp *qp; 79 int ret = 0; 80 uint32_t i; 81 struct qat_qp_config qat_qp_conf; 82 83 struct qat_qp **qp_addr = 84 (struct qat_qp **)&(dev->data->queue_pairs[qp_id]); 85 struct qat_comp_dev_private *qat_private = dev->data->dev_private; 86 const struct qat_qp_hw_data *comp_hw_qps = 87 qat_gen_config[qat_private->qat_dev->qat_dev_gen] 88 .qp_hw_data[QAT_SERVICE_COMPRESSION]; 89 const struct qat_qp_hw_data *qp_hw_data = comp_hw_qps + qp_id; 90 91 /* If qp is already in use free ring memory and qp metadata. */ 92 if (*qp_addr != NULL) { 93 ret = qat_comp_qp_release(dev, qp_id); 94 if (ret < 0) 95 return ret; 96 } 97 if (qp_id >= qat_qps_per_service(comp_hw_qps, 98 QAT_SERVICE_COMPRESSION)) { 99 QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id); 100 return -EINVAL; 101 } 102 103 qat_qp_conf.hw = qp_hw_data; 104 qat_qp_conf.build_request = qat_comp_build_request; 105 qat_qp_conf.cookie_size = sizeof(struct qat_comp_op_cookie); 106 qat_qp_conf.nb_descriptors = max_inflight_ops; 107 qat_qp_conf.socket_id = socket_id; 108 qat_qp_conf.service_str = "comp"; 109 110 ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf); 111 if (ret != 0) 112 return ret; 113 114 /* store a link to the qp in the qat_pci_device */ 115 qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][qp_id] 116 = *qp_addr; 117 118 qp = (struct qat_qp *)*qp_addr; 119 120 for (i = 0; i < qp->nb_descriptors; i++) { 121 122 struct qat_comp_op_cookie *cookie = 123 qp->op_cookies[i]; 124 125 cookie->qat_sgl_src_phys_addr = 126 rte_mempool_virt2iova(cookie) + 127 offsetof(struct qat_comp_op_cookie, 128 qat_sgl_src); 129 130 cookie->qat_sgl_dst_phys_addr = 131 rte_mempool_virt2iova(cookie) + 132 offsetof(struct qat_comp_op_cookie, 133 qat_sgl_dst); 134 } 135 136 return ret; 137 } 138 139 140 #define QAT_IM_BUFFER_DEBUG 0 141 static const struct rte_memzone * 142 qat_comp_setup_inter_buffers(struct qat_comp_dev_private *comp_dev, 143 uint32_t buff_size) 144 { 145 char inter_buff_mz_name[RTE_MEMZONE_NAMESIZE]; 146 const struct rte_memzone *memzone; 147 uint8_t *mz_start = NULL; 148 rte_iova_t mz_start_phys = 0; 149 struct array_of_ptrs *array_of_pointers; 150 int size_of_ptr_array; 151 uint32_t full_size; 152 uint32_t offset_of_sgls, offset_of_flat_buffs = 0; 153 int i; 154 int num_im_sgls = qat_gen_config[ 155 comp_dev->qat_dev->qat_dev_gen].comp_num_im_bufs_required; 156 157 QAT_LOG(DEBUG, "QAT COMP device %s needs %d sgls", 158 comp_dev->qat_dev->name, num_im_sgls); 159 snprintf(inter_buff_mz_name, RTE_MEMZONE_NAMESIZE, 160 "%s_inter_buff", comp_dev->qat_dev->name); 161 memzone = rte_memzone_lookup(inter_buff_mz_name); 162 if (memzone != NULL) { 163 QAT_LOG(DEBUG, "QAT COMP im buffer memzone created already"); 164 return memzone; 165 } 166 167 /* Create a memzone to hold intermediate buffers and associated 168 * meta-data needed by the firmware. The memzone contains 3 parts: 169 * - a list of num_im_sgls physical pointers to sgls 170 * - the num_im_sgl sgl structures, each pointing to 171 * QAT_NUM_BUFS_IN_IM_SGL flat buffers 172 * - the flat buffers: num_im_sgl * QAT_NUM_BUFS_IN_IM_SGL 173 * buffers, each of buff_size 174 * num_im_sgls depends on the hardware generation of the device 175 * buff_size comes from the user via the config file 176 */ 177 178 size_of_ptr_array = num_im_sgls * sizeof(phys_addr_t); 179 offset_of_sgls = (size_of_ptr_array + (~QAT_64_BYTE_ALIGN_MASK)) 180 & QAT_64_BYTE_ALIGN_MASK; 181 offset_of_flat_buffs = 182 offset_of_sgls + num_im_sgls * sizeof(struct qat_inter_sgl); 183 full_size = offset_of_flat_buffs + 184 num_im_sgls * buff_size * QAT_NUM_BUFS_IN_IM_SGL; 185 186 memzone = rte_memzone_reserve_aligned(inter_buff_mz_name, full_size, 187 comp_dev->compressdev->data->socket_id, 188 RTE_MEMZONE_2MB, QAT_64_BYTE_ALIGN); 189 if (memzone == NULL) { 190 QAT_LOG(ERR, "Can't allocate intermediate buffers" 191 " for device %s", comp_dev->qat_dev->name); 192 return NULL; 193 } 194 195 mz_start = (uint8_t *)memzone->addr; 196 mz_start_phys = memzone->phys_addr; 197 QAT_LOG(DEBUG, "Memzone %s: addr = %p, phys = 0x%"PRIx64 198 ", size required %d, size created %zu", 199 inter_buff_mz_name, mz_start, mz_start_phys, 200 full_size, memzone->len); 201 202 array_of_pointers = (struct array_of_ptrs *)mz_start; 203 for (i = 0; i < num_im_sgls; i++) { 204 uint32_t curr_sgl_offset = 205 offset_of_sgls + i * sizeof(struct qat_inter_sgl); 206 struct qat_inter_sgl *sgl = 207 (struct qat_inter_sgl *)(mz_start + curr_sgl_offset); 208 int lb; 209 array_of_pointers->pointer[i] = mz_start_phys + curr_sgl_offset; 210 211 sgl->num_bufs = QAT_NUM_BUFS_IN_IM_SGL; 212 sgl->num_mapped_bufs = 0; 213 sgl->resrvd = 0; 214 215 #if QAT_IM_BUFFER_DEBUG 216 QAT_LOG(DEBUG, " : phys addr of sgl[%i] in array_of_pointers" 217 " = 0x%"PRIx64, i, array_of_pointers->pointer[i]); 218 QAT_LOG(DEBUG, " : virt address of sgl[%i] = %p", i, sgl); 219 #endif 220 for (lb = 0; lb < QAT_NUM_BUFS_IN_IM_SGL; lb++) { 221 sgl->buffers[lb].addr = 222 mz_start_phys + offset_of_flat_buffs + 223 (((i * QAT_NUM_BUFS_IN_IM_SGL) + lb) * buff_size); 224 sgl->buffers[lb].len = buff_size; 225 sgl->buffers[lb].resrvd = 0; 226 #if QAT_IM_BUFFER_DEBUG 227 QAT_LOG(DEBUG, 228 " : sgl->buffers[%d].addr = 0x%"PRIx64", len=%d", 229 lb, sgl->buffers[lb].addr, sgl->buffers[lb].len); 230 #endif 231 } 232 } 233 #if QAT_IM_BUFFER_DEBUG 234 QAT_DP_HEXDUMP_LOG(DEBUG, "IM buffer memzone start:", 235 mz_start, offset_of_flat_buffs + 32); 236 #endif 237 return memzone; 238 } 239 240 static struct rte_mempool * 241 qat_comp_create_xform_pool(struct qat_comp_dev_private *comp_dev, 242 uint32_t num_elements) 243 { 244 char xform_pool_name[RTE_MEMPOOL_NAMESIZE]; 245 struct rte_mempool *mp; 246 247 snprintf(xform_pool_name, RTE_MEMPOOL_NAMESIZE, 248 "%s_xforms", comp_dev->qat_dev->name); 249 250 QAT_LOG(DEBUG, "xformpool: %s", xform_pool_name); 251 mp = rte_mempool_lookup(xform_pool_name); 252 253 if (mp != NULL) { 254 QAT_LOG(DEBUG, "xformpool already created"); 255 if (mp->size != num_elements) { 256 QAT_LOG(DEBUG, "xformpool wrong size - delete it"); 257 rte_mempool_free(mp); 258 mp = NULL; 259 comp_dev->xformpool = NULL; 260 } 261 } 262 263 if (mp == NULL) 264 mp = rte_mempool_create(xform_pool_name, 265 num_elements, 266 qat_comp_xform_size(), 0, 0, 267 NULL, NULL, NULL, NULL, rte_socket_id(), 268 0); 269 if (mp == NULL) { 270 QAT_LOG(ERR, "Err creating mempool %s w %d elements of size %d", 271 xform_pool_name, num_elements, qat_comp_xform_size()); 272 return NULL; 273 } 274 275 return mp; 276 } 277 278 static void 279 _qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev) 280 { 281 /* Free intermediate buffers */ 282 if (comp_dev->interm_buff_mz) { 283 rte_memzone_free(comp_dev->interm_buff_mz); 284 comp_dev->interm_buff_mz = NULL; 285 } 286 287 /* Free private_xform pool */ 288 if (comp_dev->xformpool) { 289 /* Free internal mempool for private xforms */ 290 rte_mempool_free(comp_dev->xformpool); 291 comp_dev->xformpool = NULL; 292 } 293 } 294 295 static int 296 qat_comp_dev_config(struct rte_compressdev *dev, 297 struct rte_compressdev_config *config) 298 { 299 struct qat_comp_dev_private *comp_dev = dev->data->dev_private; 300 int ret = 0; 301 302 if (config->max_nb_streams != 0) { 303 QAT_LOG(ERR, 304 "QAT device does not support STATEFUL so max_nb_streams must be 0"); 305 return -EINVAL; 306 } 307 308 if (RTE_PMD_QAT_COMP_IM_BUFFER_SIZE == 0) { 309 QAT_LOG(WARNING, 310 "RTE_PMD_QAT_COMP_IM_BUFFER_SIZE = 0 in config file, so" 311 " QAT device can't be used for Dynamic Deflate. " 312 "Did you really intend to do this?"); 313 } else { 314 comp_dev->interm_buff_mz = 315 qat_comp_setup_inter_buffers(comp_dev, 316 RTE_PMD_QAT_COMP_IM_BUFFER_SIZE); 317 if (comp_dev->interm_buff_mz == NULL) { 318 ret = -ENOMEM; 319 goto error_out; 320 } 321 } 322 323 comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev, 324 config->max_nb_priv_xforms); 325 if (comp_dev->xformpool == NULL) { 326 327 ret = -ENOMEM; 328 goto error_out; 329 } 330 return 0; 331 332 error_out: 333 _qat_comp_dev_config_clear(comp_dev); 334 return ret; 335 } 336 337 static int 338 qat_comp_dev_start(struct rte_compressdev *dev __rte_unused) 339 { 340 return 0; 341 } 342 343 static void 344 qat_comp_dev_stop(struct rte_compressdev *dev __rte_unused) 345 { 346 347 } 348 349 static int 350 qat_comp_dev_close(struct rte_compressdev *dev) 351 { 352 int i; 353 int ret = 0; 354 struct qat_comp_dev_private *comp_dev = dev->data->dev_private; 355 356 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 357 ret = qat_comp_qp_release(dev, i); 358 if (ret < 0) 359 return ret; 360 } 361 362 _qat_comp_dev_config_clear(comp_dev); 363 364 return ret; 365 } 366 367 368 static void 369 qat_comp_dev_info_get(struct rte_compressdev *dev, 370 struct rte_compressdev_info *info) 371 { 372 struct qat_comp_dev_private *comp_dev = dev->data->dev_private; 373 const struct qat_qp_hw_data *comp_hw_qps = 374 qat_gen_config[comp_dev->qat_dev->qat_dev_gen] 375 .qp_hw_data[QAT_SERVICE_COMPRESSION]; 376 377 if (info != NULL) { 378 info->max_nb_queue_pairs = 379 qat_qps_per_service(comp_hw_qps, 380 QAT_SERVICE_COMPRESSION); 381 info->feature_flags = dev->feature_flags; 382 info->capabilities = comp_dev->qat_dev_capabilities; 383 } 384 } 385 386 static uint16_t 387 qat_comp_pmd_enqueue_op_burst(void *qp, struct rte_comp_op **ops, 388 uint16_t nb_ops) 389 { 390 return qat_enqueue_op_burst(qp, (void **)ops, nb_ops); 391 } 392 393 static uint16_t 394 qat_comp_pmd_dequeue_op_burst(void *qp, struct rte_comp_op **ops, 395 uint16_t nb_ops) 396 { 397 return qat_dequeue_op_burst(qp, (void **)ops, nb_ops); 398 } 399 400 static uint16_t 401 qat_comp_pmd_enq_deq_dummy_op_burst(void *qp __rte_unused, 402 struct rte_comp_op **ops __rte_unused, 403 uint16_t nb_ops __rte_unused) 404 { 405 QAT_DP_LOG(ERR, "QAT PMD detected wrong FW version !"); 406 return 0; 407 } 408 409 static struct rte_compressdev_ops compress_qat_dummy_ops = { 410 411 /* Device related operations */ 412 .dev_configure = NULL, 413 .dev_start = NULL, 414 .dev_stop = qat_comp_dev_stop, 415 .dev_close = qat_comp_dev_close, 416 .dev_infos_get = NULL, 417 418 .stats_get = NULL, 419 .stats_reset = qat_comp_stats_reset, 420 .queue_pair_setup = NULL, 421 .queue_pair_release = qat_comp_qp_release, 422 423 /* Compression related operations */ 424 .private_xform_create = NULL, 425 .private_xform_free = qat_comp_private_xform_free 426 }; 427 428 static uint16_t 429 qat_comp_pmd_dequeue_frst_op_burst(void *qp, struct rte_comp_op **ops, 430 uint16_t nb_ops) 431 { 432 uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops); 433 struct qat_qp *tmp_qp = (struct qat_qp *)qp; 434 435 if (ret) { 436 if ((*ops)->debug_status == 437 (uint64_t)ERR_CODE_QAT_COMP_WRONG_FW) { 438 tmp_qp->qat_dev->comp_dev->compressdev->enqueue_burst = 439 qat_comp_pmd_enq_deq_dummy_op_burst; 440 tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst = 441 qat_comp_pmd_enq_deq_dummy_op_burst; 442 443 tmp_qp->qat_dev->comp_dev->compressdev->dev_ops = 444 &compress_qat_dummy_ops; 445 QAT_LOG(ERR, "QAT PMD detected wrong FW version !"); 446 447 } else { 448 tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst = 449 qat_comp_pmd_dequeue_op_burst; 450 } 451 } 452 return ret; 453 } 454 455 static struct rte_compressdev_ops compress_qat_ops = { 456 457 /* Device related operations */ 458 .dev_configure = qat_comp_dev_config, 459 .dev_start = qat_comp_dev_start, 460 .dev_stop = qat_comp_dev_stop, 461 .dev_close = qat_comp_dev_close, 462 .dev_infos_get = qat_comp_dev_info_get, 463 464 .stats_get = qat_comp_stats_get, 465 .stats_reset = qat_comp_stats_reset, 466 .queue_pair_setup = qat_comp_qp_setup, 467 .queue_pair_release = qat_comp_qp_release, 468 469 /* Compression related operations */ 470 .private_xform_create = qat_comp_private_xform_create, 471 .private_xform_free = qat_comp_private_xform_free 472 }; 473 474 /* An rte_driver is needed in the registration of the device with compressdev. 475 * The actual qat pci's rte_driver can't be used as its name represents 476 * the whole pci device with all services. Think of this as a holder for a name 477 * for the compression part of the pci device. 478 */ 479 static const char qat_comp_drv_name[] = RTE_STR(COMPRESSDEV_NAME_QAT_PMD); 480 static const struct rte_driver compdev_qat_driver = { 481 .name = qat_comp_drv_name, 482 .alias = qat_comp_drv_name 483 }; 484 int 485 qat_comp_dev_create(struct qat_pci_device *qat_pci_dev) 486 { 487 if (qat_pci_dev->qat_dev_gen == QAT_GEN3) { 488 QAT_LOG(ERR, "Compression PMD not supported on QAT c4xxx"); 489 return 0; 490 } 491 492 struct rte_compressdev_pmd_init_params init_params = { 493 .name = "", 494 .socket_id = qat_pci_dev->pci_dev->device.numa_node, 495 }; 496 char name[RTE_COMPRESSDEV_NAME_MAX_LEN]; 497 struct rte_compressdev *compressdev; 498 struct qat_comp_dev_private *comp_dev; 499 500 snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN, "%s_%s", 501 qat_pci_dev->name, "comp"); 502 QAT_LOG(DEBUG, "Creating QAT COMP device %s", name); 503 504 /* Populate subset device to use in compressdev device creation */ 505 qat_pci_dev->comp_rte_dev.driver = &compdev_qat_driver; 506 qat_pci_dev->comp_rte_dev.numa_node = 507 qat_pci_dev->pci_dev->device.numa_node; 508 qat_pci_dev->comp_rte_dev.devargs = NULL; 509 510 compressdev = rte_compressdev_pmd_create(name, 511 &(qat_pci_dev->comp_rte_dev), 512 sizeof(struct qat_comp_dev_private), 513 &init_params); 514 515 if (compressdev == NULL) 516 return -ENODEV; 517 518 compressdev->dev_ops = &compress_qat_ops; 519 520 compressdev->enqueue_burst = qat_comp_pmd_enqueue_op_burst; 521 compressdev->dequeue_burst = qat_comp_pmd_dequeue_frst_op_burst; 522 523 compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED; 524 525 comp_dev = compressdev->data->dev_private; 526 comp_dev->qat_dev = qat_pci_dev; 527 comp_dev->compressdev = compressdev; 528 qat_pci_dev->comp_dev = comp_dev; 529 530 switch (qat_pci_dev->qat_dev_gen) { 531 case QAT_GEN1: 532 case QAT_GEN2: 533 case QAT_GEN3: 534 comp_dev->qat_dev_capabilities = qat_comp_gen_capabilities; 535 break; 536 default: 537 comp_dev->qat_dev_capabilities = qat_comp_gen_capabilities; 538 QAT_LOG(DEBUG, 539 "QAT gen %d capabilities unknown, default to GEN1", 540 qat_pci_dev->qat_dev_gen); 541 break; 542 } 543 544 QAT_LOG(DEBUG, 545 "Created QAT COMP device %s as compressdev instance %d", 546 name, compressdev->data->dev_id); 547 return 0; 548 } 549 550 int 551 qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev) 552 { 553 struct qat_comp_dev_private *comp_dev; 554 555 if (qat_pci_dev == NULL) 556 return -ENODEV; 557 558 comp_dev = qat_pci_dev->comp_dev; 559 if (comp_dev == NULL) 560 return 0; 561 562 /* clean up any resources used by the device */ 563 qat_comp_dev_close(comp_dev->compressdev); 564 565 rte_compressdev_pmd_destroy(comp_dev->compressdev); 566 qat_pci_dev->comp_dev = NULL; 567 568 return 0; 569 } 570