1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2018 Intel Corporation 3 */ 4 5 #include <rte_common.h> 6 #include <rte_dev.h> 7 #include <rte_malloc.h> 8 #include <rte_memzone.h> 9 #include <rte_pci.h> 10 #include <rte_bus_pci.h> 11 #include <rte_atomic.h> 12 #include <rte_prefetch.h> 13 14 #include "qat_logs.h" 15 #include "qat_device.h" 16 #include "qat_qp.h" 17 #include "qat_sym.h" 18 #include "qat_asym.h" 19 #include "qat_comp.h" 20 #include "adf_transport_access_macros.h" 21 22 23 #define ADF_MAX_DESC 4096 24 #define ADF_MIN_DESC 128 25 26 #define ADF_ARB_REG_SLOT 0x1000 27 #define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C 28 29 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \ 30 ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \ 31 (ADF_ARB_REG_SLOT * index), value) 32 33 __extension__ 34 const struct qat_qp_hw_data qat_gen1_qps[QAT_MAX_SERVICES] 35 [ADF_MAX_QPS_ON_ANY_SERVICE] = { 36 /* queue pairs which provide an asymmetric crypto service */ 37 [QAT_SERVICE_ASYMMETRIC] = { 38 { 39 .service_type = QAT_SERVICE_ASYMMETRIC, 40 .hw_bundle_num = 0, 41 .tx_ring_num = 0, 42 .rx_ring_num = 8, 43 .tx_msg_size = 64, 44 .rx_msg_size = 32, 45 46 }, { 47 .service_type = QAT_SERVICE_ASYMMETRIC, 48 .hw_bundle_num = 0, 49 .tx_ring_num = 1, 50 .rx_ring_num = 9, 51 .tx_msg_size = 64, 52 .rx_msg_size = 32, 53 } 54 }, 55 /* queue pairs which provide a symmetric crypto service */ 56 [QAT_SERVICE_SYMMETRIC] = { 57 { 58 .service_type = QAT_SERVICE_SYMMETRIC, 59 .hw_bundle_num = 0, 60 .tx_ring_num = 2, 61 .rx_ring_num = 10, 62 .tx_msg_size = 128, 63 .rx_msg_size = 32, 64 }, 65 { 66 .service_type = QAT_SERVICE_SYMMETRIC, 67 .hw_bundle_num = 0, 68 .tx_ring_num = 3, 69 .rx_ring_num = 11, 70 .tx_msg_size = 128, 71 .rx_msg_size = 32, 72 } 73 }, 74 /* queue pairs which provide a compression service */ 75 [QAT_SERVICE_COMPRESSION] = { 76 { 77 .service_type = QAT_SERVICE_COMPRESSION, 78 .hw_bundle_num = 0, 79 .tx_ring_num = 6, 80 .rx_ring_num = 14, 81 .tx_msg_size = 128, 82 .rx_msg_size = 32, 83 }, { 84 .service_type = QAT_SERVICE_COMPRESSION, 85 .hw_bundle_num = 0, 86 .tx_ring_num = 7, 87 .rx_ring_num = 15, 88 .tx_msg_size = 128, 89 .rx_msg_size = 32, 90 } 91 } 92 }; 93 94 __extension__ 95 const struct qat_qp_hw_data qat_gen3_qps[QAT_MAX_SERVICES] 96 [ADF_MAX_QPS_ON_ANY_SERVICE] = { 97 /* queue pairs which provide an asymmetric crypto service */ 98 [QAT_SERVICE_ASYMMETRIC] = { 99 { 100 .service_type = QAT_SERVICE_ASYMMETRIC, 101 .hw_bundle_num = 0, 102 .tx_ring_num = 0, 103 .rx_ring_num = 4, 104 .tx_msg_size = 64, 105 .rx_msg_size = 32, 106 } 107 }, 108 /* queue pairs which provide a symmetric crypto service */ 109 [QAT_SERVICE_SYMMETRIC] = { 110 { 111 .service_type = QAT_SERVICE_SYMMETRIC, 112 .hw_bundle_num = 0, 113 .tx_ring_num = 1, 114 .rx_ring_num = 5, 115 .tx_msg_size = 128, 116 .rx_msg_size = 32, 117 } 118 }, 119 /* queue pairs which provide a compression service */ 120 [QAT_SERVICE_COMPRESSION] = { 121 { 122 .service_type = QAT_SERVICE_COMPRESSION, 123 .hw_bundle_num = 0, 124 .tx_ring_num = 3, 125 .rx_ring_num = 7, 126 .tx_msg_size = 128, 127 .rx_msg_size = 32, 128 } 129 } 130 }; 131 132 static int qat_qp_check_queue_alignment(uint64_t phys_addr, 133 uint32_t queue_size_bytes); 134 static void qat_queue_delete(struct qat_queue *queue); 135 static int qat_queue_create(struct qat_pci_device *qat_dev, 136 struct qat_queue *queue, struct qat_qp_config *, uint8_t dir); 137 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num, 138 uint32_t *queue_size_for_csr); 139 static void adf_configure_queues(struct qat_qp *queue); 140 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr, 141 rte_spinlock_t *lock); 142 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr, 143 rte_spinlock_t *lock); 144 145 146 int qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data, 147 enum qat_service_type service) 148 { 149 int i, count; 150 151 for (i = 0, count = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) 152 if (qp_hw_data[i].service_type == service) 153 count++; 154 return count; 155 } 156 157 static const struct rte_memzone * 158 queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size, 159 int socket_id) 160 { 161 const struct rte_memzone *mz; 162 163 mz = rte_memzone_lookup(queue_name); 164 if (mz != 0) { 165 if (((size_t)queue_size <= mz->len) && 166 ((socket_id == SOCKET_ID_ANY) || 167 (socket_id == mz->socket_id))) { 168 QAT_LOG(DEBUG, "re-use memzone already " 169 "allocated for %s", queue_name); 170 return mz; 171 } 172 173 QAT_LOG(ERR, "Incompatible memzone already " 174 "allocated %s, size %u, socket %d. " 175 "Requested size %u, socket %u", 176 queue_name, (uint32_t)mz->len, 177 mz->socket_id, queue_size, socket_id); 178 return NULL; 179 } 180 181 QAT_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u", 182 queue_name, queue_size, socket_id); 183 return rte_memzone_reserve_aligned(queue_name, queue_size, 184 socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size); 185 } 186 187 int qat_qp_setup(struct qat_pci_device *qat_dev, 188 struct qat_qp **qp_addr, 189 uint16_t queue_pair_id, 190 struct qat_qp_config *qat_qp_conf) 191 192 { 193 struct qat_qp *qp; 194 struct rte_pci_device *pci_dev = qat_dev->pci_dev; 195 char op_cookie_pool_name[RTE_RING_NAMESIZE]; 196 uint32_t i; 197 198 QAT_LOG(DEBUG, "Setup qp %u on qat pci device %d gen %d", 199 queue_pair_id, qat_dev->qat_dev_id, qat_dev->qat_dev_gen); 200 201 if ((qat_qp_conf->nb_descriptors > ADF_MAX_DESC) || 202 (qat_qp_conf->nb_descriptors < ADF_MIN_DESC)) { 203 QAT_LOG(ERR, "Can't create qp for %u descriptors", 204 qat_qp_conf->nb_descriptors); 205 return -EINVAL; 206 } 207 208 if (pci_dev->mem_resource[0].addr == NULL) { 209 QAT_LOG(ERR, "Could not find VF config space " 210 "(UIO driver attached?)."); 211 return -EINVAL; 212 } 213 214 /* Allocate the queue pair data structure. */ 215 qp = rte_zmalloc("qat PMD qp metadata", 216 sizeof(*qp), RTE_CACHE_LINE_SIZE); 217 if (qp == NULL) { 218 QAT_LOG(ERR, "Failed to alloc mem for qp struct"); 219 return -ENOMEM; 220 } 221 qp->nb_descriptors = qat_qp_conf->nb_descriptors; 222 qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer", 223 qat_qp_conf->nb_descriptors * sizeof(*qp->op_cookies), 224 RTE_CACHE_LINE_SIZE); 225 if (qp->op_cookies == NULL) { 226 QAT_LOG(ERR, "Failed to alloc mem for cookie"); 227 rte_free(qp); 228 return -ENOMEM; 229 } 230 231 qp->mmap_bar_addr = pci_dev->mem_resource[0].addr; 232 qp->inflights16 = 0; 233 234 if (qat_queue_create(qat_dev, &(qp->tx_q), qat_qp_conf, 235 ADF_RING_DIR_TX) != 0) { 236 QAT_LOG(ERR, "Tx queue create failed " 237 "queue_pair_id=%u", queue_pair_id); 238 goto create_err; 239 } 240 241 if (qat_queue_create(qat_dev, &(qp->rx_q), qat_qp_conf, 242 ADF_RING_DIR_RX) != 0) { 243 QAT_LOG(ERR, "Rx queue create failed " 244 "queue_pair_id=%hu", queue_pair_id); 245 qat_queue_delete(&(qp->tx_q)); 246 goto create_err; 247 } 248 249 adf_configure_queues(qp); 250 adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr, 251 &qat_dev->arb_csr_lock); 252 253 snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, 254 "%s%d_cookies_%s_qp%hu", 255 pci_dev->driver->driver.name, qat_dev->qat_dev_id, 256 qat_qp_conf->service_str, queue_pair_id); 257 258 QAT_LOG(DEBUG, "cookiepool: %s", op_cookie_pool_name); 259 qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name); 260 if (qp->op_cookie_pool == NULL) 261 qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name, 262 qp->nb_descriptors, 263 qat_qp_conf->cookie_size, 64, 0, 264 NULL, NULL, NULL, NULL, qat_qp_conf->socket_id, 265 0); 266 if (!qp->op_cookie_pool) { 267 QAT_LOG(ERR, "QAT PMD Cannot create" 268 " op mempool"); 269 goto create_err; 270 } 271 272 for (i = 0; i < qp->nb_descriptors; i++) { 273 if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) { 274 QAT_LOG(ERR, "QAT PMD Cannot get op_cookie"); 275 goto create_err; 276 } 277 memset(qp->op_cookies[i], 0, qat_qp_conf->cookie_size); 278 } 279 280 qp->qat_dev_gen = qat_dev->qat_dev_gen; 281 qp->build_request = qat_qp_conf->build_request; 282 qp->service_type = qat_qp_conf->hw->service_type; 283 qp->qat_dev = qat_dev; 284 285 QAT_LOG(DEBUG, "QP setup complete: id: %d, cookiepool: %s", 286 queue_pair_id, op_cookie_pool_name); 287 288 *qp_addr = qp; 289 return 0; 290 291 create_err: 292 if (qp->op_cookie_pool) 293 rte_mempool_free(qp->op_cookie_pool); 294 rte_free(qp->op_cookies); 295 rte_free(qp); 296 return -EFAULT; 297 } 298 299 int qat_qp_release(struct qat_qp **qp_addr) 300 { 301 struct qat_qp *qp = *qp_addr; 302 uint32_t i; 303 304 if (qp == NULL) { 305 QAT_LOG(DEBUG, "qp already freed"); 306 return 0; 307 } 308 309 QAT_LOG(DEBUG, "Free qp on qat_pci device %d", 310 qp->qat_dev->qat_dev_id); 311 312 /* Don't free memory if there are still responses to be processed */ 313 if (qp->inflights16 == 0) { 314 qat_queue_delete(&(qp->tx_q)); 315 qat_queue_delete(&(qp->rx_q)); 316 } else { 317 return -EAGAIN; 318 } 319 320 adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr, 321 &qp->qat_dev->arb_csr_lock); 322 323 for (i = 0; i < qp->nb_descriptors; i++) 324 rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]); 325 326 if (qp->op_cookie_pool) 327 rte_mempool_free(qp->op_cookie_pool); 328 329 rte_free(qp->op_cookies); 330 rte_free(qp); 331 *qp_addr = NULL; 332 return 0; 333 } 334 335 336 static void qat_queue_delete(struct qat_queue *queue) 337 { 338 const struct rte_memzone *mz; 339 int status = 0; 340 341 if (queue == NULL) { 342 QAT_LOG(DEBUG, "Invalid queue"); 343 return; 344 } 345 QAT_LOG(DEBUG, "Free ring %d, memzone: %s", 346 queue->hw_queue_number, queue->memz_name); 347 348 mz = rte_memzone_lookup(queue->memz_name); 349 if (mz != NULL) { 350 /* Write an unused pattern to the queue memory. */ 351 memset(queue->base_addr, 0x7F, queue->queue_size); 352 status = rte_memzone_free(mz); 353 if (status != 0) 354 QAT_LOG(ERR, "Error %d on freeing queue %s", 355 status, queue->memz_name); 356 } else { 357 QAT_LOG(DEBUG, "queue %s doesn't exist", 358 queue->memz_name); 359 } 360 } 361 362 static int 363 qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue, 364 struct qat_qp_config *qp_conf, uint8_t dir) 365 { 366 uint64_t queue_base; 367 void *io_addr; 368 const struct rte_memzone *qp_mz; 369 struct rte_pci_device *pci_dev = qat_dev->pci_dev; 370 int ret = 0; 371 uint16_t desc_size = (dir == ADF_RING_DIR_TX ? 372 qp_conf->hw->tx_msg_size : qp_conf->hw->rx_msg_size); 373 uint32_t queue_size_bytes = (qp_conf->nb_descriptors)*(desc_size); 374 375 queue->hw_bundle_number = qp_conf->hw->hw_bundle_num; 376 queue->hw_queue_number = (dir == ADF_RING_DIR_TX ? 377 qp_conf->hw->tx_ring_num : qp_conf->hw->rx_ring_num); 378 379 if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) { 380 QAT_LOG(ERR, "Invalid descriptor size %d", desc_size); 381 return -EINVAL; 382 } 383 384 /* 385 * Allocate a memzone for the queue - create a unique name. 386 */ 387 snprintf(queue->memz_name, sizeof(queue->memz_name), 388 "%s_%d_%s_%s_%d_%d", 389 pci_dev->driver->driver.name, qat_dev->qat_dev_id, 390 qp_conf->service_str, "qp_mem", 391 queue->hw_bundle_number, queue->hw_queue_number); 392 qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes, 393 qp_conf->socket_id); 394 if (qp_mz == NULL) { 395 QAT_LOG(ERR, "Failed to allocate ring memzone"); 396 return -ENOMEM; 397 } 398 399 queue->base_addr = (char *)qp_mz->addr; 400 queue->base_phys_addr = qp_mz->iova; 401 if (qat_qp_check_queue_alignment(queue->base_phys_addr, 402 queue_size_bytes)) { 403 QAT_LOG(ERR, "Invalid alignment on queue create " 404 " 0x%"PRIx64"\n", 405 queue->base_phys_addr); 406 ret = -EFAULT; 407 goto queue_create_err; 408 } 409 410 if (adf_verify_queue_size(desc_size, qp_conf->nb_descriptors, 411 &(queue->queue_size)) != 0) { 412 QAT_LOG(ERR, "Invalid num inflights"); 413 ret = -EINVAL; 414 goto queue_create_err; 415 } 416 417 queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size, 418 ADF_BYTES_TO_MSG_SIZE(desc_size)); 419 queue->modulo_mask = (1 << ADF_RING_SIZE_MODULO(queue->queue_size)) - 1; 420 421 if (queue->max_inflights < 2) { 422 QAT_LOG(ERR, "Invalid num inflights"); 423 ret = -EINVAL; 424 goto queue_create_err; 425 } 426 queue->head = 0; 427 queue->tail = 0; 428 queue->msg_size = desc_size; 429 430 /* 431 * Write an unused pattern to the queue memory. 432 */ 433 memset(queue->base_addr, 0x7F, queue_size_bytes); 434 435 queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr, 436 queue->queue_size); 437 438 io_addr = pci_dev->mem_resource[0].addr; 439 440 WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number, 441 queue->hw_queue_number, queue_base); 442 443 QAT_LOG(DEBUG, "RING: Name:%s, size in CSR: %u, in bytes %u," 444 " nb msgs %u, msg_size %u, max_inflights %u modulo mask %u", 445 queue->memz_name, 446 queue->queue_size, queue_size_bytes, 447 qp_conf->nb_descriptors, desc_size, 448 queue->max_inflights, queue->modulo_mask); 449 450 return 0; 451 452 queue_create_err: 453 rte_memzone_free(qp_mz); 454 return ret; 455 } 456 457 static int qat_qp_check_queue_alignment(uint64_t phys_addr, 458 uint32_t queue_size_bytes) 459 { 460 if (((queue_size_bytes - 1) & phys_addr) != 0) 461 return -EINVAL; 462 return 0; 463 } 464 465 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num, 466 uint32_t *p_queue_size_for_csr) 467 { 468 uint8_t i = ADF_MIN_RING_SIZE; 469 470 for (; i <= ADF_MAX_RING_SIZE; i++) 471 if ((msg_size * msg_num) == 472 (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) { 473 *p_queue_size_for_csr = i; 474 return 0; 475 } 476 QAT_LOG(ERR, "Invalid ring size %d", msg_size * msg_num); 477 return -EINVAL; 478 } 479 480 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr, 481 rte_spinlock_t *lock) 482 { 483 uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + 484 (ADF_ARB_REG_SLOT * 485 txq->hw_bundle_number); 486 uint32_t value; 487 488 rte_spinlock_lock(lock); 489 value = ADF_CSR_RD(base_addr, arb_csr_offset); 490 value |= (0x01 << txq->hw_queue_number); 491 ADF_CSR_WR(base_addr, arb_csr_offset, value); 492 rte_spinlock_unlock(lock); 493 } 494 495 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr, 496 rte_spinlock_t *lock) 497 { 498 uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + 499 (ADF_ARB_REG_SLOT * 500 txq->hw_bundle_number); 501 uint32_t value; 502 503 rte_spinlock_lock(lock); 504 value = ADF_CSR_RD(base_addr, arb_csr_offset); 505 value &= ~(0x01 << txq->hw_queue_number); 506 ADF_CSR_WR(base_addr, arb_csr_offset, value); 507 rte_spinlock_unlock(lock); 508 } 509 510 static void adf_configure_queues(struct qat_qp *qp) 511 { 512 uint32_t queue_config; 513 struct qat_queue *queue = &qp->tx_q; 514 515 queue_config = BUILD_RING_CONFIG(queue->queue_size); 516 517 WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number, 518 queue->hw_queue_number, queue_config); 519 520 queue = &qp->rx_q; 521 queue_config = 522 BUILD_RESP_RING_CONFIG(queue->queue_size, 523 ADF_RING_NEAR_WATERMARK_512, 524 ADF_RING_NEAR_WATERMARK_0); 525 526 WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number, 527 queue->hw_queue_number, queue_config); 528 } 529 530 static inline uint32_t adf_modulo(uint32_t data, uint32_t modulo_mask) 531 { 532 return data & modulo_mask; 533 } 534 535 static inline void 536 txq_write_tail(struct qat_qp *qp, struct qat_queue *q) { 537 WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number, 538 q->hw_queue_number, q->tail); 539 q->nb_pending_requests = 0; 540 q->csr_tail = q->tail; 541 } 542 543 static inline 544 void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q) 545 { 546 uint32_t old_head, new_head; 547 uint32_t max_head; 548 549 old_head = q->csr_head; 550 new_head = q->head; 551 max_head = qp->nb_descriptors * q->msg_size; 552 553 /* write out free descriptors */ 554 void *cur_desc = (uint8_t *)q->base_addr + old_head; 555 556 if (new_head < old_head) { 557 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head); 558 memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head); 559 } else { 560 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head); 561 } 562 q->nb_processed_responses = 0; 563 q->csr_head = new_head; 564 565 /* write current head to CSR */ 566 WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number, 567 q->hw_queue_number, new_head); 568 } 569 570 uint16_t 571 qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops) 572 { 573 register struct qat_queue *queue; 574 struct qat_qp *tmp_qp = (struct qat_qp *)qp; 575 register uint32_t nb_ops_sent = 0; 576 register int ret; 577 uint16_t nb_ops_possible = nb_ops; 578 register uint8_t *base_addr; 579 register uint32_t tail; 580 int overflow; 581 582 if (unlikely(nb_ops == 0)) 583 return 0; 584 585 /* read params used a lot in main loop into registers */ 586 queue = &(tmp_qp->tx_q); 587 base_addr = (uint8_t *)queue->base_addr; 588 tail = queue->tail; 589 590 /* Find how many can actually fit on the ring */ 591 tmp_qp->inflights16 += nb_ops; 592 overflow = tmp_qp->inflights16 - queue->max_inflights; 593 if (overflow > 0) { 594 tmp_qp->inflights16 -= overflow; 595 nb_ops_possible = nb_ops - overflow; 596 if (nb_ops_possible == 0) 597 return 0; 598 } 599 600 while (nb_ops_sent != nb_ops_possible) { 601 ret = tmp_qp->build_request(*ops, base_addr + tail, 602 tmp_qp->op_cookies[tail / queue->msg_size], 603 tmp_qp->qat_dev_gen); 604 if (ret != 0) { 605 tmp_qp->stats.enqueue_err_count++; 606 /* 607 * This message cannot be enqueued, 608 * decrease number of ops that wasn't sent 609 */ 610 tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent; 611 if (nb_ops_sent == 0) 612 return 0; 613 goto kick_tail; 614 } 615 616 tail = adf_modulo(tail + queue->msg_size, queue->modulo_mask); 617 ops++; 618 nb_ops_sent++; 619 } 620 kick_tail: 621 queue->tail = tail; 622 tmp_qp->stats.enqueued_count += nb_ops_sent; 623 queue->nb_pending_requests += nb_ops_sent; 624 if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH || 625 queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) { 626 txq_write_tail(tmp_qp, queue); 627 } 628 return nb_ops_sent; 629 } 630 631 uint16_t 632 qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops) 633 { 634 struct qat_queue *rx_queue, *tx_queue; 635 struct qat_qp *tmp_qp = (struct qat_qp *)qp; 636 uint32_t head; 637 uint32_t resp_counter = 0; 638 uint8_t *resp_msg; 639 640 rx_queue = &(tmp_qp->rx_q); 641 tx_queue = &(tmp_qp->tx_q); 642 head = rx_queue->head; 643 resp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head; 644 645 while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG && 646 resp_counter != nb_ops) { 647 648 if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) 649 qat_sym_process_response(ops, resp_msg); 650 else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) 651 qat_comp_process_response(ops, resp_msg, 652 &tmp_qp->stats.dequeue_err_count); 653 else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) { 654 #ifdef BUILD_QAT_ASYM 655 qat_asym_process_response(ops, resp_msg, 656 tmp_qp->op_cookies[head / rx_queue->msg_size]); 657 #endif 658 } 659 660 head = adf_modulo(head + rx_queue->msg_size, 661 rx_queue->modulo_mask); 662 663 resp_msg = (uint8_t *)rx_queue->base_addr + head; 664 ops++; 665 resp_counter++; 666 } 667 if (resp_counter > 0) { 668 rx_queue->head = head; 669 tmp_qp->stats.dequeued_count += resp_counter; 670 rx_queue->nb_processed_responses += resp_counter; 671 tmp_qp->inflights16 -= resp_counter; 672 673 if (rx_queue->nb_processed_responses > 674 QAT_CSR_HEAD_WRITE_THRESH) 675 rxq_free_desc(tmp_qp, rx_queue); 676 } 677 /* also check if tail needs to be advanced */ 678 if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH && 679 tx_queue->tail != tx_queue->csr_tail) { 680 txq_write_tail(tmp_qp, tx_queue); 681 } 682 return resp_counter; 683 } 684 685 __rte_weak int 686 qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused, 687 uint64_t *dequeue_err_count __rte_unused) 688 { 689 return 0; 690 } 691