1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2019 Intel Corporation 3 */ 4 5 #include <rte_mempool.h> 6 #include <rte_mbuf.h> 7 #include <rte_hexdump.h> 8 #include <rte_comp.h> 9 #include <rte_bus_pci.h> 10 #include <rte_byteorder.h> 11 #include <rte_memcpy.h> 12 #include <rte_common.h> 13 #include <rte_spinlock.h> 14 #include <rte_log.h> 15 #include <rte_malloc.h> 16 #include <rte_memzone.h> 17 18 #include "qat_logs.h" 19 #include "qat_comp.h" 20 #include "qat_comp_pmd.h" 21 22 static void 23 qat_comp_fallback_to_fixed(struct icp_qat_fw_comp_req *comp_req) 24 { 25 QAT_DP_LOG(DEBUG, "QAT PMD: fallback to fixed compression!"); 26 27 comp_req->comn_hdr.service_cmd_id = 28 ICP_QAT_FW_COMP_CMD_STATIC; 29 30 ICP_QAT_FW_COMN_NEXT_ID_SET( 31 &comp_req->comp_cd_ctrl, 32 ICP_QAT_FW_SLICE_DRAM_WR); 33 34 ICP_QAT_FW_COMN_NEXT_ID_SET( 35 &comp_req->u2.xlt_cd_ctrl, 36 ICP_QAT_FW_SLICE_NULL); 37 ICP_QAT_FW_COMN_CURR_ID_SET( 38 &comp_req->u2.xlt_cd_ctrl, 39 ICP_QAT_FW_SLICE_NULL); 40 } 41 42 void 43 qat_comp_free_split_op_memzones(struct qat_comp_op_cookie *cookie, 44 unsigned int nb_children) 45 { 46 unsigned int i; 47 48 /* free all memzones allocated for child descriptors */ 49 for (i = 0; i < nb_children; i++) 50 rte_memzone_free(cookie->dst_memzones[i]); 51 52 /* and free the pointer table */ 53 rte_free(cookie->dst_memzones); 54 cookie->dst_memzones = NULL; 55 } 56 57 static int 58 qat_comp_allocate_split_op_memzones(struct qat_comp_op_cookie *cookie, 59 unsigned int nb_descriptors_needed) 60 { 61 struct qat_queue *txq = &(cookie->qp->tx_q); 62 char dst_memz_name[RTE_MEMZONE_NAMESIZE]; 63 unsigned int i; 64 65 /* allocate the array of memzone pointers */ 66 cookie->dst_memzones = rte_zmalloc_socket("qat PMD im buf mz pointers", 67 (nb_descriptors_needed - 1) * 68 sizeof(const struct rte_memzone *), 69 RTE_CACHE_LINE_SIZE, cookie->socket_id); 70 71 if (cookie->dst_memzones == NULL) { 72 QAT_DP_LOG(ERR, 73 "QAT PMD: failed to allocate im buf mz pointers"); 74 return -ENOMEM; 75 } 76 77 for (i = 0; i < nb_descriptors_needed - 1; i++) { 78 snprintf(dst_memz_name, 79 sizeof(dst_memz_name), 80 "dst_%u_%u_%u_%u_%u", 81 cookie->qp->qat_dev->qat_dev_id, 82 txq->hw_bundle_number, txq->hw_queue_number, 83 cookie->cookie_index, i); 84 85 cookie->dst_memzones[i] = rte_memzone_reserve_aligned( 86 dst_memz_name, RTE_PMD_QAT_COMP_IM_BUFFER_SIZE, 87 cookie->socket_id, RTE_MEMZONE_IOVA_CONTIG, 88 RTE_CACHE_LINE_SIZE); 89 90 if (cookie->dst_memzones[i] == NULL) { 91 QAT_DP_LOG(ERR, 92 "QAT PMD: failed to allocate dst buffer memzone"); 93 94 /* let's free all memzones allocated up to now */ 95 qat_comp_free_split_op_memzones(cookie, i); 96 97 return -ENOMEM; 98 } 99 } 100 101 return 0; 102 } 103 104 int 105 qat_comp_build_request(void *in_op, uint8_t *out_msg, 106 void *op_cookie, 107 enum qat_device_gen qat_dev_gen __rte_unused) 108 { 109 struct rte_comp_op *op = in_op; 110 struct qat_comp_op_cookie *cookie = 111 (struct qat_comp_op_cookie *)op_cookie; 112 struct qat_comp_stream *stream; 113 struct qat_comp_xform *qat_xform; 114 const uint8_t *tmpl; 115 struct icp_qat_fw_comp_req *comp_req = 116 (struct icp_qat_fw_comp_req *)out_msg; 117 118 if (op->op_type == RTE_COMP_OP_STATEFUL) { 119 stream = op->stream; 120 qat_xform = &stream->qat_xform; 121 if (unlikely(qat_xform->qat_comp_request_type != 122 QAT_COMP_REQUEST_DECOMPRESS)) { 123 QAT_DP_LOG(ERR, "QAT PMD does not support stateful compression"); 124 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; 125 return -EINVAL; 126 } 127 if (unlikely(stream->op_in_progress)) { 128 QAT_DP_LOG(ERR, "QAT PMD does not support running multiple stateful operations on the same stream at once"); 129 op->status = RTE_COMP_OP_STATUS_INVALID_STATE; 130 return -EINVAL; 131 } 132 stream->op_in_progress = 1; 133 } else { 134 stream = NULL; 135 qat_xform = op->private_xform; 136 } 137 tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl; 138 139 rte_mov128(out_msg, tmpl); 140 comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op; 141 142 if (likely(qat_xform->qat_comp_request_type == 143 QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) { 144 145 if (unlikely(op->src.length > QAT_FALLBACK_THLD)) { 146 /* the operation must be split into pieces */ 147 if (qat_xform->checksum_type != 148 RTE_COMP_CHECKSUM_NONE) { 149 /* fallback to fixed compression in case any 150 * checksum calculation was requested 151 */ 152 qat_comp_fallback_to_fixed(comp_req); 153 } else { 154 /* calculate num. of descriptors for split op */ 155 unsigned int nb_descriptors_needed = 156 op->src.length / QAT_FALLBACK_THLD + 1; 157 /* allocate memzone for output data */ 158 if (qat_comp_allocate_split_op_memzones( 159 cookie, nb_descriptors_needed)) { 160 /* out of memory, fallback to fixed */ 161 qat_comp_fallback_to_fixed(comp_req); 162 } else { 163 QAT_DP_LOG(DEBUG, 164 "Input data is too big, op must be split into %u descriptors", 165 nb_descriptors_needed); 166 return (int) nb_descriptors_needed; 167 } 168 } 169 } 170 171 /* set BFINAL bit according to flush_flag */ 172 comp_req->comp_pars.req_par_flags = 173 ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD( 174 ICP_QAT_FW_COMP_SOP, 175 ICP_QAT_FW_COMP_EOP, 176 op->flush_flag == RTE_COMP_FLUSH_FINAL ? 177 ICP_QAT_FW_COMP_BFINAL 178 : ICP_QAT_FW_COMP_NOT_BFINAL, 179 ICP_QAT_FW_COMP_CNV, 180 ICP_QAT_FW_COMP_CNV_RECOVERY); 181 182 } else if (op->op_type == RTE_COMP_OP_STATEFUL) { 183 184 comp_req->comp_pars.req_par_flags = 185 ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD( 186 (stream->start_of_packet) ? 187 ICP_QAT_FW_COMP_SOP 188 : ICP_QAT_FW_COMP_NOT_SOP, 189 (op->flush_flag == RTE_COMP_FLUSH_FULL || 190 op->flush_flag == RTE_COMP_FLUSH_FINAL) ? 191 ICP_QAT_FW_COMP_EOP 192 : ICP_QAT_FW_COMP_NOT_EOP, 193 ICP_QAT_FW_COMP_NOT_BFINAL, 194 ICP_QAT_FW_COMP_CNV, 195 ICP_QAT_FW_COMP_CNV_RECOVERY); 196 } 197 198 /* common for sgl and flat buffers */ 199 comp_req->comp_pars.comp_len = op->src.length; 200 comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst) - 201 op->dst.offset; 202 203 if (op->m_src->next != NULL || op->m_dst->next != NULL) { 204 /* sgl */ 205 int ret = 0; 206 207 ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags, 208 QAT_COMN_PTR_TYPE_SGL); 209 210 if (unlikely(op->m_src->nb_segs > cookie->src_nb_elems)) { 211 /* we need to allocate more elements in SGL*/ 212 void *tmp; 213 214 tmp = rte_realloc_socket(cookie->qat_sgl_src_d, 215 sizeof(struct qat_sgl) + 216 sizeof(struct qat_flat_buf) * 217 op->m_src->nb_segs, 64, 218 cookie->socket_id); 219 220 if (unlikely(tmp == NULL)) { 221 QAT_DP_LOG(ERR, "QAT PMD can't allocate memory" 222 " for %d elements of SGL", 223 op->m_src->nb_segs); 224 op->status = RTE_COMP_OP_STATUS_ERROR; 225 /* clear op-in-progress flag */ 226 if (stream) 227 stream->op_in_progress = 0; 228 return -ENOMEM; 229 } 230 /* new SGL is valid now */ 231 cookie->qat_sgl_src_d = (struct qat_sgl *)tmp; 232 cookie->src_nb_elems = op->m_src->nb_segs; 233 cookie->qat_sgl_src_phys_addr = 234 rte_malloc_virt2iova(cookie->qat_sgl_src_d); 235 } 236 237 ret = qat_sgl_fill_array(op->m_src, 238 op->src.offset, 239 cookie->qat_sgl_src_d, 240 op->src.length, 241 cookie->src_nb_elems); 242 if (ret) { 243 QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array"); 244 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; 245 /* clear op-in-progress flag */ 246 if (stream) 247 stream->op_in_progress = 0; 248 return ret; 249 } 250 251 if (unlikely(op->m_dst->nb_segs > cookie->dst_nb_elems)) { 252 /* we need to allocate more elements in SGL*/ 253 struct qat_sgl *tmp; 254 255 tmp = rte_realloc_socket(cookie->qat_sgl_dst_d, 256 sizeof(struct qat_sgl) + 257 sizeof(struct qat_flat_buf) * 258 op->m_dst->nb_segs, 64, 259 cookie->socket_id); 260 261 if (unlikely(tmp == NULL)) { 262 QAT_DP_LOG(ERR, "QAT PMD can't allocate memory" 263 " for %d elements of SGL", 264 op->m_dst->nb_segs); 265 op->status = RTE_COMP_OP_STATUS_ERROR; 266 /* clear op-in-progress flag */ 267 if (stream) 268 stream->op_in_progress = 0; 269 return -ENOMEM; 270 } 271 /* new SGL is valid now */ 272 cookie->qat_sgl_dst_d = (struct qat_sgl *)tmp; 273 cookie->dst_nb_elems = op->m_dst->nb_segs; 274 cookie->qat_sgl_dst_phys_addr = 275 rte_malloc_virt2iova(cookie->qat_sgl_dst_d); 276 } 277 278 ret = qat_sgl_fill_array(op->m_dst, 279 op->dst.offset, 280 cookie->qat_sgl_dst_d, 281 comp_req->comp_pars.out_buffer_sz, 282 cookie->dst_nb_elems); 283 if (ret) { 284 QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array"); 285 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; 286 /* clear op-in-progress flag */ 287 if (stream) 288 stream->op_in_progress = 0; 289 return ret; 290 } 291 292 comp_req->comn_mid.src_data_addr = 293 cookie->qat_sgl_src_phys_addr; 294 comp_req->comn_mid.dest_data_addr = 295 cookie->qat_sgl_dst_phys_addr; 296 comp_req->comn_mid.src_length = 0; 297 comp_req->comn_mid.dst_length = 0; 298 299 } else { 300 /* flat aka linear buffer */ 301 ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags, 302 QAT_COMN_PTR_TYPE_FLAT); 303 comp_req->comn_mid.src_length = op->src.length; 304 comp_req->comn_mid.dst_length = 305 comp_req->comp_pars.out_buffer_sz; 306 307 comp_req->comn_mid.src_data_addr = 308 rte_pktmbuf_iova_offset(op->m_src, op->src.offset); 309 comp_req->comn_mid.dest_data_addr = 310 rte_pktmbuf_iova_offset(op->m_dst, op->dst.offset); 311 } 312 313 if (unlikely(rte_pktmbuf_pkt_len(op->m_dst) < QAT_MIN_OUT_BUF_SIZE)) { 314 /* QAT doesn't support dest. buffer lower 315 * than QAT_MIN_OUT_BUF_SIZE. Propagate error mark 316 * by converting this request to the null one 317 * and check the status in the response. 318 */ 319 QAT_DP_LOG(WARNING, "QAT destination buffer too small - resend with larger buffer"); 320 comp_req->comn_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL; 321 comp_req->comn_hdr.service_cmd_id = ICP_QAT_FW_NULL_REQ_SERV_ID; 322 cookie->error = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED; 323 } 324 325 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG 326 QAT_DP_LOG(DEBUG, "Direction: %s", 327 qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ? 328 "decompression" : "compression"); 329 QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message:", comp_req, 330 sizeof(struct icp_qat_fw_comp_req)); 331 #endif 332 return 0; 333 } 334 335 static inline uint32_t adf_modulo(uint32_t data, uint32_t modulo_mask) 336 { 337 return data & modulo_mask; 338 } 339 340 static inline void 341 qat_comp_mbuf_skip(struct rte_mbuf **mbuf, uint32_t *offset, uint32_t len) 342 { 343 while (*offset + len >= rte_pktmbuf_data_len(*mbuf)) { 344 len -= (rte_pktmbuf_data_len(*mbuf) - *offset); 345 *mbuf = (*mbuf)->next; 346 *offset = 0; 347 } 348 *offset = len; 349 } 350 351 int 352 qat_comp_build_multiple_requests(void *in_op, struct qat_qp *qp, 353 uint32_t parent_tail, int nb_descr) 354 { 355 struct rte_comp_op op_backup; 356 struct rte_mbuf dst_mbuf; 357 struct rte_comp_op *op = in_op; 358 struct qat_queue *txq = &(qp->tx_q); 359 uint8_t *base_addr = (uint8_t *)txq->base_addr; 360 uint8_t *out_msg = base_addr + parent_tail; 361 uint32_t tail = parent_tail; 362 struct icp_qat_fw_comp_req *comp_req = 363 (struct icp_qat_fw_comp_req *)out_msg; 364 struct qat_comp_op_cookie *parent_cookie = 365 (struct qat_comp_op_cookie *) 366 qp->op_cookies[parent_tail / txq->msg_size]; 367 struct qat_comp_op_cookie *child_cookie; 368 uint16_t dst_data_size = 369 RTE_MIN(RTE_PMD_QAT_COMP_IM_BUFFER_SIZE, 65535); 370 uint32_t data_to_enqueue = op->src.length - QAT_FALLBACK_THLD; 371 int num_descriptors_built = 1; 372 int ret; 373 374 QAT_DP_LOG(DEBUG, "op %p, parent_cookie %p", op, parent_cookie); 375 376 /* copy original op to the local variable for restoring later */ 377 rte_memcpy(&op_backup, op, sizeof(op_backup)); 378 379 parent_cookie->nb_child_responses = 0; 380 parent_cookie->nb_children = 0; 381 parent_cookie->split_op = 1; 382 parent_cookie->dst_data = op->m_dst; 383 parent_cookie->dst_data_offset = op->dst.offset; 384 385 op->src.length = QAT_FALLBACK_THLD; 386 op->flush_flag = RTE_COMP_FLUSH_FULL; 387 388 QAT_DP_LOG(DEBUG, "parent op src len %u dst len %u", 389 op->src.length, op->m_dst->pkt_len); 390 391 ret = qat_comp_build_request(in_op, out_msg, parent_cookie, 392 qp->qat_dev_gen); 393 if (ret != 0) { 394 /* restore op and clear cookie */ 395 QAT_DP_LOG(WARNING, "Failed to build parent descriptor"); 396 op->src.length = op_backup.src.length; 397 op->flush_flag = op_backup.flush_flag; 398 parent_cookie->split_op = 0; 399 return ret; 400 } 401 402 /* prepare local dst mbuf */ 403 rte_memcpy(&dst_mbuf, op->m_dst, sizeof(dst_mbuf)); 404 rte_pktmbuf_reset(&dst_mbuf); 405 dst_mbuf.buf_len = dst_data_size; 406 dst_mbuf.data_len = dst_data_size; 407 dst_mbuf.pkt_len = dst_data_size; 408 dst_mbuf.data_off = 0; 409 410 /* update op for the child operations */ 411 op->m_dst = &dst_mbuf; 412 op->dst.offset = 0; 413 414 while (data_to_enqueue) { 415 const struct rte_memzone *mz = 416 parent_cookie->dst_memzones[num_descriptors_built - 1]; 417 uint32_t src_data_size = RTE_MIN(data_to_enqueue, 418 QAT_FALLBACK_THLD); 419 uint32_t cookie_index; 420 421 /* update params for the next op */ 422 op->src.offset += QAT_FALLBACK_THLD; 423 op->src.length = src_data_size; 424 op->flush_flag = (src_data_size == data_to_enqueue) ? 425 op_backup.flush_flag : RTE_COMP_FLUSH_FULL; 426 427 /* update dst mbuf for the next op (use memzone for dst data) */ 428 dst_mbuf.buf_addr = mz->addr; 429 dst_mbuf.buf_iova = mz->iova; 430 431 /* move the tail and calculate next cookie index */ 432 tail = adf_modulo(tail + txq->msg_size, txq->modulo_mask); 433 cookie_index = tail / txq->msg_size; 434 child_cookie = (struct qat_comp_op_cookie *) 435 qp->op_cookies[cookie_index]; 436 comp_req = (struct icp_qat_fw_comp_req *)(base_addr + tail); 437 438 /* update child cookie */ 439 child_cookie->split_op = 1; /* must be set for child as well */ 440 child_cookie->parent_cookie = parent_cookie; /* same as above */ 441 child_cookie->nb_children = 0; 442 child_cookie->dest_buffer = mz->addr; 443 444 QAT_DP_LOG(DEBUG, 445 "cookie_index %u, child_cookie %p, comp_req %p", 446 cookie_index, child_cookie, comp_req); 447 QAT_DP_LOG(DEBUG, 448 "data_to_enqueue %u, num_descriptors_built %d", 449 data_to_enqueue, num_descriptors_built); 450 QAT_DP_LOG(DEBUG, "child op src len %u dst len %u", 451 op->src.length, op->m_dst->pkt_len); 452 453 /* build the request */ 454 ret = qat_comp_build_request(op, (uint8_t *)comp_req, 455 child_cookie, qp->qat_dev_gen); 456 if (ret < 0) { 457 QAT_DP_LOG(WARNING, "Failed to build child descriptor"); 458 /* restore op and clear cookie */ 459 rte_memcpy(op, &op_backup, sizeof(op_backup)); 460 parent_cookie->split_op = 0; 461 parent_cookie->nb_children = 0; 462 return ret; 463 } 464 465 data_to_enqueue -= src_data_size; 466 num_descriptors_built++; 467 } 468 469 /* restore backed up original op */ 470 rte_memcpy(op, &op_backup, sizeof(op_backup)); 471 472 if (nb_descr != num_descriptors_built) 473 QAT_DP_LOG(ERR, "split op. expected %d, built %d", 474 nb_descr, num_descriptors_built); 475 476 parent_cookie->nb_children = num_descriptors_built - 1; 477 return num_descriptors_built; 478 } 479 480 static inline void 481 qat_comp_response_data_copy(struct qat_comp_op_cookie *cookie, 482 struct rte_comp_op *rx_op) 483 { 484 struct qat_comp_op_cookie *pc = cookie->parent_cookie; 485 struct rte_mbuf *sgl_buf = pc->dst_data; 486 void *op_dst_addr = rte_pktmbuf_mtod_offset(sgl_buf, uint8_t *, 487 pc->dst_data_offset); 488 489 /* number of bytes left in the current segment */ 490 uint32_t left_in_current = rte_pktmbuf_data_len(sgl_buf) - 491 pc->dst_data_offset; 492 493 uint32_t prod, sent; 494 495 if (rx_op->produced <= left_in_current) { 496 rte_memcpy(op_dst_addr, cookie->dest_buffer, 497 rx_op->produced); 498 /* calculate dst mbuf and offset for the next child op */ 499 if (rx_op->produced == left_in_current) { 500 pc->dst_data = sgl_buf->next; 501 pc->dst_data_offset = 0; 502 } else 503 pc->dst_data_offset += rx_op->produced; 504 } else { 505 rte_memcpy(op_dst_addr, cookie->dest_buffer, 506 left_in_current); 507 sgl_buf = sgl_buf->next; 508 prod = rx_op->produced - left_in_current; 509 sent = left_in_current; 510 while (prod > rte_pktmbuf_data_len(sgl_buf)) { 511 op_dst_addr = rte_pktmbuf_mtod_offset(sgl_buf, 512 uint8_t *, 0); 513 514 rte_memcpy(op_dst_addr, 515 ((uint8_t *)cookie->dest_buffer) + 516 sent, 517 rte_pktmbuf_data_len(sgl_buf)); 518 519 prod -= rte_pktmbuf_data_len(sgl_buf); 520 sent += rte_pktmbuf_data_len(sgl_buf); 521 522 sgl_buf = sgl_buf->next; 523 } 524 525 op_dst_addr = rte_pktmbuf_mtod_offset(sgl_buf, uint8_t *, 0); 526 527 rte_memcpy(op_dst_addr, 528 ((uint8_t *)cookie->dest_buffer) + sent, 529 prod); 530 531 /* calculate dst mbuf and offset for the next child op */ 532 if (prod == rte_pktmbuf_data_len(sgl_buf)) { 533 pc->dst_data = sgl_buf->next; 534 pc->dst_data_offset = 0; 535 } else { 536 pc->dst_data = sgl_buf; 537 pc->dst_data_offset = prod; 538 } 539 } 540 } 541 542 int 543 qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie, 544 uint64_t *dequeue_err_count) 545 { 546 struct icp_qat_fw_comp_resp *resp_msg = 547 (struct icp_qat_fw_comp_resp *)resp; 548 struct qat_comp_op_cookie *cookie = 549 (struct qat_comp_op_cookie *)op_cookie; 550 551 struct icp_qat_fw_resp_comp_pars *comp_resp1 = 552 (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars; 553 554 QAT_DP_LOG(DEBUG, "input counter = %u, output counter = %u", 555 comp_resp1->input_byte_counter, 556 comp_resp1->output_byte_counter); 557 558 struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t) 559 (resp_msg->opaque_data); 560 struct qat_comp_stream *stream; 561 struct qat_comp_xform *qat_xform; 562 int err = resp_msg->comn_resp.comn_status & 563 ((1 << QAT_COMN_RESP_CMP_STATUS_BITPOS) | 564 (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS)); 565 566 if (rx_op->op_type == RTE_COMP_OP_STATEFUL) { 567 stream = rx_op->stream; 568 qat_xform = &stream->qat_xform; 569 /* clear op-in-progress flag */ 570 stream->op_in_progress = 0; 571 } else { 572 stream = NULL; 573 qat_xform = rx_op->private_xform; 574 } 575 576 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG 577 QAT_DP_LOG(DEBUG, "Direction: %s", 578 qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ? 579 "decompression" : "compression"); 580 QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg, 581 sizeof(struct icp_qat_fw_comp_resp)); 582 #endif 583 584 if (unlikely(cookie->error)) { 585 rx_op->status = cookie->error; 586 cookie->error = 0; 587 ++(*dequeue_err_count); 588 rx_op->debug_status = 0; 589 rx_op->consumed = 0; 590 rx_op->produced = 0; 591 *op = (void *)rx_op; 592 /* also in this case number of returned ops */ 593 /* must be equal to one, */ 594 /* appropriate status (error) must be set as well */ 595 return 1; 596 } 597 598 if (likely(qat_xform->qat_comp_request_type 599 != QAT_COMP_REQUEST_DECOMPRESS)) { 600 if (unlikely(ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET( 601 resp_msg->comn_resp.hdr_flags) 602 == ICP_QAT_FW_COMP_NO_CNV)) { 603 rx_op->status = RTE_COMP_OP_STATUS_ERROR; 604 rx_op->debug_status = ERR_CODE_QAT_COMP_WRONG_FW; 605 *op = (void *)rx_op; 606 QAT_DP_LOG(ERR, 607 "This QAT hardware doesn't support compression operation"); 608 ++(*dequeue_err_count); 609 return 1; 610 } 611 } 612 613 if (err) { 614 if (unlikely((err & (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS)) 615 && (qat_xform->qat_comp_request_type 616 == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS))) { 617 QAT_DP_LOG(ERR, "QAT intermediate buffer may be too " 618 "small for output, try configuring a larger size"); 619 } 620 621 int8_t cmp_err_code = 622 (int8_t)resp_msg->comn_resp.comn_error.cmp_err_code; 623 int8_t xlat_err_code = 624 (int8_t)resp_msg->comn_resp.comn_error.xlat_err_code; 625 626 /* handle recoverable out-of-buffer condition in stateful 627 * decompression scenario 628 */ 629 if (cmp_err_code == ERR_CODE_OVERFLOW_ERROR && !xlat_err_code 630 && qat_xform->qat_comp_request_type 631 == QAT_COMP_REQUEST_DECOMPRESS 632 && rx_op->op_type == RTE_COMP_OP_STATEFUL) { 633 struct icp_qat_fw_resp_comp_pars *comp_resp = 634 &resp_msg->comp_resp_pars; 635 rx_op->status = 636 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE; 637 rx_op->consumed = comp_resp->input_byte_counter; 638 rx_op->produced = comp_resp->output_byte_counter; 639 stream->start_of_packet = 0; 640 } else if ((cmp_err_code == ERR_CODE_OVERFLOW_ERROR 641 && !xlat_err_code) 642 || 643 (!cmp_err_code && xlat_err_code == ERR_CODE_OVERFLOW_ERROR) 644 || 645 (cmp_err_code == ERR_CODE_OVERFLOW_ERROR && 646 xlat_err_code == ERR_CODE_OVERFLOW_ERROR)){ 647 648 struct icp_qat_fw_resp_comp_pars *comp_resp = 649 (struct icp_qat_fw_resp_comp_pars *) 650 &resp_msg->comp_resp_pars; 651 652 /* handle recoverable out-of-buffer condition 653 * in stateless compression scenario 654 */ 655 if (comp_resp->input_byte_counter) { 656 if ((qat_xform->qat_comp_request_type 657 == QAT_COMP_REQUEST_FIXED_COMP_STATELESS) || 658 (qat_xform->qat_comp_request_type 659 == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) { 660 661 rx_op->status = 662 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE; 663 rx_op->consumed = 664 comp_resp->input_byte_counter; 665 rx_op->produced = 666 comp_resp->output_byte_counter; 667 } else 668 rx_op->status = 669 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED; 670 } else 671 rx_op->status = 672 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED; 673 } else 674 rx_op->status = RTE_COMP_OP_STATUS_ERROR; 675 676 ++(*dequeue_err_count); 677 rx_op->debug_status = 678 *((uint16_t *)(&resp_msg->comn_resp.comn_error)); 679 } else { 680 struct icp_qat_fw_resp_comp_pars *comp_resp = 681 (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars; 682 683 rx_op->status = RTE_COMP_OP_STATUS_SUCCESS; 684 rx_op->consumed = comp_resp->input_byte_counter; 685 rx_op->produced = comp_resp->output_byte_counter; 686 if (stream) 687 stream->start_of_packet = 0; 688 689 if (qat_xform->checksum_type != RTE_COMP_CHECKSUM_NONE) { 690 if (qat_xform->checksum_type == RTE_COMP_CHECKSUM_CRC32) 691 rx_op->output_chksum = comp_resp->curr_crc32; 692 else if (qat_xform->checksum_type == 693 RTE_COMP_CHECKSUM_ADLER32) 694 rx_op->output_chksum = comp_resp->curr_adler_32; 695 else 696 rx_op->output_chksum = comp_resp->curr_chksum; 697 } 698 } 699 QAT_DP_LOG(DEBUG, "About to check for split op :cookies: %p %p, split:%u", 700 cookie, cookie->parent_cookie, cookie->split_op); 701 702 if (cookie->split_op) { 703 *op = NULL; 704 struct qat_comp_op_cookie *pc = cookie->parent_cookie; 705 706 if (cookie->nb_children > 0) { 707 QAT_DP_LOG(DEBUG, "Parent"); 708 /* parent - don't return until all children 709 * responses are collected 710 */ 711 cookie->total_consumed = rx_op->consumed; 712 cookie->total_produced = rx_op->produced; 713 if (err) { 714 cookie->error = rx_op->status; 715 rx_op->status = RTE_COMP_OP_STATUS_SUCCESS; 716 } else { 717 /* calculate dst mbuf and offset for child op */ 718 qat_comp_mbuf_skip(&cookie->dst_data, 719 &cookie->dst_data_offset, 720 rx_op->produced); 721 } 722 } else { 723 QAT_DP_LOG(DEBUG, "Child"); 724 if (pc->error == RTE_COMP_OP_STATUS_SUCCESS) { 725 if (err) 726 pc->error = rx_op->status; 727 if (rx_op->produced) { 728 /* this covers both SUCCESS and 729 * OUT_OF_SPACE_RECOVERABLE cases 730 */ 731 qat_comp_response_data_copy(cookie, 732 rx_op); 733 pc->total_consumed += rx_op->consumed; 734 pc->total_produced += rx_op->produced; 735 } 736 } 737 rx_op->status = RTE_COMP_OP_STATUS_SUCCESS; 738 739 pc->nb_child_responses++; 740 741 /* (child) cookie fields have to be reset 742 * to avoid problems with reusability - 743 * rx and tx queue starting from index zero 744 */ 745 cookie->nb_children = 0; 746 cookie->split_op = 0; 747 cookie->nb_child_responses = 0; 748 cookie->dest_buffer = NULL; 749 750 if (pc->nb_child_responses == pc->nb_children) { 751 uint8_t child_resp; 752 753 /* parent should be included as well */ 754 child_resp = pc->nb_child_responses + 1; 755 756 rx_op->status = pc->error; 757 rx_op->consumed = pc->total_consumed; 758 rx_op->produced = pc->total_produced; 759 *op = (void *)rx_op; 760 761 /* free memzones used for dst data */ 762 qat_comp_free_split_op_memzones(pc, 763 pc->nb_children); 764 765 /* (parent) cookie fields have to be reset 766 * to avoid problems with reusability - 767 * rx and tx queue starting from index zero 768 */ 769 pc->nb_children = 0; 770 pc->split_op = 0; 771 pc->nb_child_responses = 0; 772 pc->error = RTE_COMP_OP_STATUS_SUCCESS; 773 774 return child_resp; 775 } 776 } 777 return 0; 778 } 779 780 *op = (void *)rx_op; 781 return 1; 782 } 783 784 unsigned int 785 qat_comp_xform_size(void) 786 { 787 return RTE_ALIGN_CEIL(sizeof(struct qat_comp_xform), 8); 788 } 789 790 unsigned int 791 qat_comp_stream_size(void) 792 { 793 return RTE_ALIGN_CEIL(sizeof(struct qat_comp_stream), 8); 794 } 795 796 static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header, 797 enum qat_comp_request_type request) 798 { 799 if (request == QAT_COMP_REQUEST_FIXED_COMP_STATELESS) 800 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC; 801 else if (request == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS) 802 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC; 803 else if (request == QAT_COMP_REQUEST_DECOMPRESS) 804 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS; 805 806 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP; 807 header->hdr_flags = 808 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); 809 810 header->comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD( 811 QAT_COMN_CD_FLD_TYPE_16BYTE_DATA, QAT_COMN_PTR_TYPE_FLAT); 812 } 813 814 static int qat_comp_create_templates(struct qat_comp_xform *qat_xform, 815 const struct rte_memzone *interm_buff_mz, 816 const struct rte_comp_xform *xform, 817 const struct qat_comp_stream *stream, 818 enum rte_comp_op_type op_type) 819 { 820 struct icp_qat_fw_comp_req *comp_req; 821 int comp_level, algo; 822 uint32_t req_par_flags; 823 int direction = ICP_QAT_HW_COMPRESSION_DIR_COMPRESS; 824 825 if (unlikely(qat_xform == NULL)) { 826 QAT_LOG(ERR, "Session was not created for this device"); 827 return -EINVAL; 828 } 829 830 if (op_type == RTE_COMP_OP_STATEFUL) { 831 if (unlikely(stream == NULL)) { 832 QAT_LOG(ERR, "Stream must be non null for stateful op"); 833 return -EINVAL; 834 } 835 if (unlikely(qat_xform->qat_comp_request_type != 836 QAT_COMP_REQUEST_DECOMPRESS)) { 837 QAT_LOG(ERR, "QAT PMD does not support stateful compression"); 838 return -ENOTSUP; 839 } 840 } 841 842 if (qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) { 843 direction = ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS; 844 comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1; 845 req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD( 846 ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP, 847 ICP_QAT_FW_COMP_BFINAL, 848 ICP_QAT_FW_COMP_CNV, 849 ICP_QAT_FW_COMP_CNV_RECOVERY); 850 } else { 851 if (xform->compress.level == RTE_COMP_LEVEL_PMD_DEFAULT) 852 comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8; 853 else if (xform->compress.level == 1) 854 comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1; 855 else if (xform->compress.level == 2) 856 comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_4; 857 else if (xform->compress.level == 3) 858 comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8; 859 else if (xform->compress.level >= 4 && 860 xform->compress.level <= 9) 861 comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_16; 862 else { 863 QAT_LOG(ERR, "compression level not supported"); 864 return -EINVAL; 865 } 866 req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD( 867 ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP, 868 ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_CNV, 869 ICP_QAT_FW_COMP_CNV_RECOVERY); 870 } 871 872 switch (xform->compress.algo) { 873 case RTE_COMP_ALGO_DEFLATE: 874 algo = ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE; 875 break; 876 case RTE_COMP_ALGO_LZS: 877 default: 878 /* RTE_COMP_NULL */ 879 QAT_LOG(ERR, "compression algorithm not supported"); 880 return -EINVAL; 881 } 882 883 comp_req = &qat_xform->qat_comp_req_tmpl; 884 885 /* Initialize header */ 886 qat_comp_create_req_hdr(&comp_req->comn_hdr, 887 qat_xform->qat_comp_request_type); 888 889 if (op_type == RTE_COMP_OP_STATEFUL) { 890 comp_req->comn_hdr.serv_specif_flags = 891 ICP_QAT_FW_COMP_FLAGS_BUILD( 892 ICP_QAT_FW_COMP_STATEFUL_SESSION, 893 ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST, 894 ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST, 895 ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST, 896 ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF); 897 898 /* Decompression state registers */ 899 comp_req->comp_cd_ctrl.comp_state_addr = 900 stream->state_registers_decomp_phys; 901 902 /* Enable A, B, C, D, and E (CAMs). */ 903 comp_req->comp_cd_ctrl.ram_bank_flags = 904 ICP_QAT_FW_COMP_RAM_FLAGS_BUILD( 905 ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank I */ 906 ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank H */ 907 ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank G */ 908 ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank F */ 909 ICP_QAT_FW_COMP_BANK_ENABLED, /* Bank E */ 910 ICP_QAT_FW_COMP_BANK_ENABLED, /* Bank D */ 911 ICP_QAT_FW_COMP_BANK_ENABLED, /* Bank C */ 912 ICP_QAT_FW_COMP_BANK_ENABLED, /* Bank B */ 913 ICP_QAT_FW_COMP_BANK_ENABLED); /* Bank A */ 914 915 comp_req->comp_cd_ctrl.ram_banks_addr = 916 stream->inflate_context_phys; 917 } else { 918 comp_req->comn_hdr.serv_specif_flags = 919 ICP_QAT_FW_COMP_FLAGS_BUILD( 920 ICP_QAT_FW_COMP_STATELESS_SESSION, 921 ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST, 922 ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST, 923 ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST, 924 ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF); 925 } 926 927 comp_req->cd_pars.sl.comp_slice_cfg_word[0] = 928 ICP_QAT_HW_COMPRESSION_CONFIG_BUILD( 929 direction, 930 /* In CPM 1.6 only valid mode ! */ 931 ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED, algo, 932 /* Translate level to depth */ 933 comp_level, ICP_QAT_HW_COMPRESSION_FILE_TYPE_0); 934 935 comp_req->comp_pars.initial_adler = 1; 936 comp_req->comp_pars.initial_crc32 = 0; 937 comp_req->comp_pars.req_par_flags = req_par_flags; 938 939 940 if (qat_xform->qat_comp_request_type == 941 QAT_COMP_REQUEST_FIXED_COMP_STATELESS || 942 qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) { 943 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl, 944 ICP_QAT_FW_SLICE_DRAM_WR); 945 ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl, 946 ICP_QAT_FW_SLICE_COMP); 947 } else if (qat_xform->qat_comp_request_type == 948 QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS) { 949 950 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl, 951 ICP_QAT_FW_SLICE_XLAT); 952 ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl, 953 ICP_QAT_FW_SLICE_COMP); 954 955 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->u2.xlt_cd_ctrl, 956 ICP_QAT_FW_SLICE_DRAM_WR); 957 ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->u2.xlt_cd_ctrl, 958 ICP_QAT_FW_SLICE_XLAT); 959 960 comp_req->u1.xlt_pars.inter_buff_ptr = 961 interm_buff_mz->iova; 962 } 963 964 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG 965 QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message template:", comp_req, 966 sizeof(struct icp_qat_fw_comp_req)); 967 #endif 968 return 0; 969 } 970 971 /** 972 * Create driver private_xform data. 973 * 974 * @param dev 975 * Compressdev device 976 * @param xform 977 * xform data from application 978 * @param private_xform 979 * ptr where handle of pmd's private_xform data should be stored 980 * @return 981 * - if successful returns 0 982 * and valid private_xform handle 983 * - <0 in error cases 984 * - Returns -EINVAL if input parameters are invalid. 985 * - Returns -ENOTSUP if comp device does not support the comp transform. 986 * - Returns -ENOMEM if the private_xform could not be allocated. 987 */ 988 int 989 qat_comp_private_xform_create(struct rte_compressdev *dev, 990 const struct rte_comp_xform *xform, 991 void **private_xform) 992 { 993 struct qat_comp_dev_private *qat = dev->data->dev_private; 994 995 if (unlikely(private_xform == NULL)) { 996 QAT_LOG(ERR, "QAT: private_xform parameter is NULL"); 997 return -EINVAL; 998 } 999 if (unlikely(qat->xformpool == NULL)) { 1000 QAT_LOG(ERR, "QAT device has no private_xform mempool"); 1001 return -ENOMEM; 1002 } 1003 if (rte_mempool_get(qat->xformpool, private_xform)) { 1004 QAT_LOG(ERR, "Couldn't get object from qat xform mempool"); 1005 return -ENOMEM; 1006 } 1007 1008 struct qat_comp_xform *qat_xform = 1009 (struct qat_comp_xform *)*private_xform; 1010 1011 if (xform->type == RTE_COMP_COMPRESS) { 1012 1013 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED || 1014 ((xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_DEFAULT) 1015 && qat->interm_buff_mz == NULL)) 1016 qat_xform->qat_comp_request_type = 1017 QAT_COMP_REQUEST_FIXED_COMP_STATELESS; 1018 1019 else if ((xform->compress.deflate.huffman == 1020 RTE_COMP_HUFFMAN_DYNAMIC || 1021 xform->compress.deflate.huffman == 1022 RTE_COMP_HUFFMAN_DEFAULT) && 1023 qat->interm_buff_mz != NULL) 1024 1025 qat_xform->qat_comp_request_type = 1026 QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS; 1027 1028 else { 1029 QAT_LOG(ERR, 1030 "IM buffers needed for dynamic deflate. Set size in config file"); 1031 return -EINVAL; 1032 } 1033 1034 qat_xform->checksum_type = xform->compress.chksum; 1035 1036 } else { 1037 qat_xform->qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS; 1038 qat_xform->checksum_type = xform->decompress.chksum; 1039 } 1040 1041 if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform, 1042 NULL, RTE_COMP_OP_STATELESS)) { 1043 QAT_LOG(ERR, "QAT: Problem with setting compression"); 1044 return -EINVAL; 1045 } 1046 return 0; 1047 } 1048 1049 /** 1050 * Free driver private_xform data. 1051 * 1052 * @param dev 1053 * Compressdev device 1054 * @param private_xform 1055 * handle of pmd's private_xform data 1056 * @return 1057 * - 0 if successful 1058 * - <0 in error cases 1059 * - Returns -EINVAL if input parameters are invalid. 1060 */ 1061 int 1062 qat_comp_private_xform_free(struct rte_compressdev *dev __rte_unused, 1063 void *private_xform) 1064 { 1065 struct qat_comp_xform *qat_xform = 1066 (struct qat_comp_xform *)private_xform; 1067 1068 if (qat_xform) { 1069 memset(qat_xform, 0, qat_comp_xform_size()); 1070 struct rte_mempool *mp = rte_mempool_from_obj(qat_xform); 1071 1072 rte_mempool_put(mp, qat_xform); 1073 return 0; 1074 } 1075 return -EINVAL; 1076 } 1077 1078 /** 1079 * Reset stream state for the next use. 1080 * 1081 * @param stream 1082 * handle of pmd's private stream data 1083 */ 1084 static void 1085 qat_comp_stream_reset(struct qat_comp_stream *stream) 1086 { 1087 if (stream) { 1088 memset(&stream->qat_xform, 0, sizeof(struct qat_comp_xform)); 1089 stream->start_of_packet = 1; 1090 stream->op_in_progress = 0; 1091 } 1092 } 1093 1094 /** 1095 * Create driver private stream data. 1096 * 1097 * @param dev 1098 * Compressdev device 1099 * @param xform 1100 * xform data 1101 * @param stream 1102 * ptr where handle of pmd's private stream data should be stored 1103 * @return 1104 * - Returns 0 if private stream structure has been created successfully. 1105 * - Returns -EINVAL if input parameters are invalid. 1106 * - Returns -ENOTSUP if comp device does not support STATEFUL operations. 1107 * - Returns -ENOTSUP if comp device does not support the comp transform. 1108 * - Returns -ENOMEM if the private stream could not be allocated. 1109 */ 1110 int 1111 qat_comp_stream_create(struct rte_compressdev *dev, 1112 const struct rte_comp_xform *xform, 1113 void **stream) 1114 { 1115 struct qat_comp_dev_private *qat = dev->data->dev_private; 1116 struct qat_comp_stream *ptr; 1117 1118 if (unlikely(stream == NULL)) { 1119 QAT_LOG(ERR, "QAT: stream parameter is NULL"); 1120 return -EINVAL; 1121 } 1122 if (unlikely(xform->type == RTE_COMP_COMPRESS)) { 1123 QAT_LOG(ERR, "QAT: stateful compression not supported"); 1124 return -ENOTSUP; 1125 } 1126 if (unlikely(qat->streampool == NULL)) { 1127 QAT_LOG(ERR, "QAT device has no stream mempool"); 1128 return -ENOMEM; 1129 } 1130 if (rte_mempool_get(qat->streampool, stream)) { 1131 QAT_LOG(ERR, "Couldn't get object from qat stream mempool"); 1132 return -ENOMEM; 1133 } 1134 1135 ptr = (struct qat_comp_stream *) *stream; 1136 qat_comp_stream_reset(ptr); 1137 ptr->qat_xform.qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS; 1138 ptr->qat_xform.checksum_type = xform->decompress.chksum; 1139 1140 if (qat_comp_create_templates(&ptr->qat_xform, qat->interm_buff_mz, 1141 xform, ptr, RTE_COMP_OP_STATEFUL)) { 1142 QAT_LOG(ERR, "QAT: problem with creating descriptor template for stream"); 1143 rte_mempool_put(qat->streampool, *stream); 1144 *stream = NULL; 1145 return -EINVAL; 1146 } 1147 1148 return 0; 1149 } 1150 1151 /** 1152 * Free driver private stream data. 1153 * 1154 * @param dev 1155 * Compressdev device 1156 * @param stream 1157 * handle of pmd's private stream data 1158 * @return 1159 * - 0 if successful 1160 * - <0 in error cases 1161 * - Returns -EINVAL if input parameters are invalid. 1162 * - Returns -ENOTSUP if comp device does not support STATEFUL operations. 1163 * - Returns -EBUSY if can't free stream as there are inflight operations 1164 */ 1165 int 1166 qat_comp_stream_free(struct rte_compressdev *dev, void *stream) 1167 { 1168 if (stream) { 1169 struct qat_comp_dev_private *qat = dev->data->dev_private; 1170 qat_comp_stream_reset((struct qat_comp_stream *) stream); 1171 rte_mempool_put(qat->streampool, stream); 1172 return 0; 1173 } 1174 return -EINVAL; 1175 } 1176