1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2021 Intel Corporation 3 */ 4 5 #include <rte_mempool.h> 6 #include <rte_mbuf.h> 7 #include <rte_hexdump.h> 8 #include <rte_comp.h> 9 #include <rte_bus_pci.h> 10 #include <rte_byteorder.h> 11 #include <rte_memcpy.h> 12 #include <rte_common.h> 13 #include <rte_spinlock.h> 14 #include <rte_log.h> 15 #include <rte_malloc.h> 16 #include <rte_memzone.h> 17 18 #include "qat_logs.h" 19 #include "qat_comp.h" 20 #include "qat_comp_pmd.h" 21 22 static void 23 qat_comp_fallback_to_fixed(struct icp_qat_fw_comp_req *comp_req) 24 { 25 QAT_DP_LOG(DEBUG, "QAT PMD: fallback to fixed compression!"); 26 27 comp_req->comn_hdr.service_cmd_id = 28 ICP_QAT_FW_COMP_CMD_STATIC; 29 30 ICP_QAT_FW_COMN_NEXT_ID_SET( 31 &comp_req->comp_cd_ctrl, 32 ICP_QAT_FW_SLICE_DRAM_WR); 33 34 ICP_QAT_FW_COMN_NEXT_ID_SET( 35 &comp_req->u2.xlt_cd_ctrl, 36 ICP_QAT_FW_SLICE_NULL); 37 ICP_QAT_FW_COMN_CURR_ID_SET( 38 &comp_req->u2.xlt_cd_ctrl, 39 ICP_QAT_FW_SLICE_NULL); 40 } 41 42 void 43 qat_comp_free_split_op_memzones(struct qat_comp_op_cookie *cookie, 44 unsigned int nb_children) 45 { 46 unsigned int i; 47 48 /* free all memzones allocated for child descriptors */ 49 for (i = 0; i < nb_children; i++) 50 rte_memzone_free(cookie->dst_memzones[i]); 51 52 /* and free the pointer table */ 53 rte_free(cookie->dst_memzones); 54 cookie->dst_memzones = NULL; 55 } 56 57 static int 58 qat_comp_allocate_split_op_memzones(struct qat_comp_op_cookie *cookie, 59 unsigned int nb_descriptors_needed) 60 { 61 struct qat_queue *txq = &(cookie->qp->tx_q); 62 char dst_memz_name[RTE_MEMZONE_NAMESIZE]; 63 unsigned int i; 64 65 /* allocate the array of memzone pointers */ 66 cookie->dst_memzones = rte_zmalloc_socket("qat PMD im buf mz pointers", 67 (nb_descriptors_needed - 1) * 68 sizeof(const struct rte_memzone *), 69 RTE_CACHE_LINE_SIZE, cookie->socket_id); 70 71 if (cookie->dst_memzones == NULL) { 72 QAT_DP_LOG(ERR, 73 "QAT PMD: failed to allocate im buf mz pointers"); 74 return -ENOMEM; 75 } 76 77 for (i = 0; i < nb_descriptors_needed - 1; i++) { 78 snprintf(dst_memz_name, 79 sizeof(dst_memz_name), 80 "dst_%u_%u_%u_%u_%u", 81 cookie->qp->qat_dev->qat_dev_id, 82 txq->hw_bundle_number, txq->hw_queue_number, 83 cookie->cookie_index, i); 84 85 cookie->dst_memzones[i] = rte_memzone_reserve_aligned( 86 dst_memz_name, RTE_PMD_QAT_COMP_IM_BUFFER_SIZE, 87 cookie->socket_id, RTE_MEMZONE_IOVA_CONTIG, 88 RTE_CACHE_LINE_SIZE); 89 90 if (cookie->dst_memzones[i] == NULL) { 91 QAT_DP_LOG(ERR, 92 "QAT PMD: failed to allocate dst buffer memzone"); 93 94 /* let's free all memzones allocated up to now */ 95 qat_comp_free_split_op_memzones(cookie, i); 96 97 return -ENOMEM; 98 } 99 } 100 101 return 0; 102 } 103 104 int 105 qat_comp_build_request(void *in_op, uint8_t *out_msg, 106 void *op_cookie, 107 enum qat_device_gen qat_dev_gen __rte_unused) 108 { 109 struct rte_comp_op *op = in_op; 110 struct qat_comp_op_cookie *cookie = 111 (struct qat_comp_op_cookie *)op_cookie; 112 struct qat_comp_stream *stream; 113 struct qat_comp_xform *qat_xform; 114 const uint8_t *tmpl; 115 struct icp_qat_fw_comp_req *comp_req = 116 (struct icp_qat_fw_comp_req *)out_msg; 117 118 if (op->op_type == RTE_COMP_OP_STATEFUL) { 119 stream = op->stream; 120 qat_xform = &stream->qat_xform; 121 if (unlikely(qat_xform->qat_comp_request_type != 122 QAT_COMP_REQUEST_DECOMPRESS)) { 123 QAT_DP_LOG(ERR, "QAT PMD does not support stateful compression"); 124 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; 125 return -EINVAL; 126 } 127 if (unlikely(stream->op_in_progress)) { 128 QAT_DP_LOG(ERR, "QAT PMD does not support running multiple stateful operations on the same stream at once"); 129 op->status = RTE_COMP_OP_STATUS_INVALID_STATE; 130 return -EINVAL; 131 } 132 stream->op_in_progress = 1; 133 } else { 134 stream = NULL; 135 qat_xform = op->private_xform; 136 } 137 tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl; 138 139 rte_mov128(out_msg, tmpl); 140 comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op; 141 142 if (likely(qat_xform->qat_comp_request_type == 143 QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) { 144 145 if (unlikely(op->src.length > QAT_FALLBACK_THLD)) { 146 /* the operation must be split into pieces */ 147 if (qat_xform->checksum_type != 148 RTE_COMP_CHECKSUM_NONE) { 149 /* fallback to fixed compression in case any 150 * checksum calculation was requested 151 */ 152 qat_comp_fallback_to_fixed(comp_req); 153 } else { 154 /* calculate num. of descriptors for split op */ 155 unsigned int nb_descriptors_needed = 156 op->src.length / QAT_FALLBACK_THLD + 1; 157 /* allocate memzone for output data */ 158 if (qat_comp_allocate_split_op_memzones( 159 cookie, nb_descriptors_needed)) { 160 /* out of memory, fallback to fixed */ 161 qat_comp_fallback_to_fixed(comp_req); 162 } else { 163 QAT_DP_LOG(DEBUG, 164 "Input data is too big, op must be split into %u descriptors", 165 nb_descriptors_needed); 166 return (int) nb_descriptors_needed; 167 } 168 } 169 } 170 171 /* set BFINAL bit according to flush_flag */ 172 comp_req->comp_pars.req_par_flags = 173 ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD( 174 ICP_QAT_FW_COMP_SOP, 175 ICP_QAT_FW_COMP_EOP, 176 op->flush_flag == RTE_COMP_FLUSH_FINAL ? 177 ICP_QAT_FW_COMP_BFINAL 178 : ICP_QAT_FW_COMP_NOT_BFINAL, 179 ICP_QAT_FW_COMP_CNV, 180 ICP_QAT_FW_COMP_CNV_RECOVERY); 181 182 } else if (op->op_type == RTE_COMP_OP_STATEFUL) { 183 184 comp_req->comp_pars.req_par_flags = 185 ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD( 186 (stream->start_of_packet) ? 187 ICP_QAT_FW_COMP_SOP 188 : ICP_QAT_FW_COMP_NOT_SOP, 189 (op->flush_flag == RTE_COMP_FLUSH_FULL || 190 op->flush_flag == RTE_COMP_FLUSH_FINAL) ? 191 ICP_QAT_FW_COMP_EOP 192 : ICP_QAT_FW_COMP_NOT_EOP, 193 ICP_QAT_FW_COMP_NOT_BFINAL, 194 ICP_QAT_FW_COMP_CNV, 195 ICP_QAT_FW_COMP_CNV_RECOVERY); 196 } 197 198 /* common for sgl and flat buffers */ 199 comp_req->comp_pars.comp_len = op->src.length; 200 comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst) - 201 op->dst.offset; 202 203 if (op->m_src->next != NULL || op->m_dst->next != NULL) { 204 /* sgl */ 205 int ret = 0; 206 207 ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags, 208 QAT_COMN_PTR_TYPE_SGL); 209 210 if (unlikely(op->m_src->nb_segs > cookie->src_nb_elems)) { 211 /* we need to allocate more elements in SGL*/ 212 void *tmp; 213 214 tmp = rte_realloc_socket(cookie->qat_sgl_src_d, 215 sizeof(struct qat_sgl) + 216 sizeof(struct qat_flat_buf) * 217 op->m_src->nb_segs, 64, 218 cookie->socket_id); 219 220 if (unlikely(tmp == NULL)) { 221 QAT_DP_LOG(ERR, "QAT PMD can't allocate memory" 222 " for %d elements of SGL", 223 op->m_src->nb_segs); 224 op->status = RTE_COMP_OP_STATUS_ERROR; 225 /* clear op-in-progress flag */ 226 if (stream) 227 stream->op_in_progress = 0; 228 return -ENOMEM; 229 } 230 /* new SGL is valid now */ 231 cookie->qat_sgl_src_d = (struct qat_sgl *)tmp; 232 cookie->src_nb_elems = op->m_src->nb_segs; 233 cookie->qat_sgl_src_phys_addr = 234 rte_malloc_virt2iova(cookie->qat_sgl_src_d); 235 } 236 237 ret = qat_sgl_fill_array(op->m_src, 238 op->src.offset, 239 cookie->qat_sgl_src_d, 240 op->src.length, 241 cookie->src_nb_elems); 242 if (ret) { 243 QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array"); 244 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; 245 /* clear op-in-progress flag */ 246 if (stream) 247 stream->op_in_progress = 0; 248 return ret; 249 } 250 251 if (unlikely(op->m_dst->nb_segs > cookie->dst_nb_elems)) { 252 /* we need to allocate more elements in SGL*/ 253 struct qat_sgl *tmp; 254 255 tmp = rte_realloc_socket(cookie->qat_sgl_dst_d, 256 sizeof(struct qat_sgl) + 257 sizeof(struct qat_flat_buf) * 258 op->m_dst->nb_segs, 64, 259 cookie->socket_id); 260 261 if (unlikely(tmp == NULL)) { 262 QAT_DP_LOG(ERR, "QAT PMD can't allocate memory" 263 " for %d elements of SGL", 264 op->m_dst->nb_segs); 265 op->status = RTE_COMP_OP_STATUS_ERROR; 266 /* clear op-in-progress flag */ 267 if (stream) 268 stream->op_in_progress = 0; 269 return -ENOMEM; 270 } 271 /* new SGL is valid now */ 272 cookie->qat_sgl_dst_d = (struct qat_sgl *)tmp; 273 cookie->dst_nb_elems = op->m_dst->nb_segs; 274 cookie->qat_sgl_dst_phys_addr = 275 rte_malloc_virt2iova(cookie->qat_sgl_dst_d); 276 } 277 278 ret = qat_sgl_fill_array(op->m_dst, 279 op->dst.offset, 280 cookie->qat_sgl_dst_d, 281 comp_req->comp_pars.out_buffer_sz, 282 cookie->dst_nb_elems); 283 if (ret) { 284 QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array"); 285 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; 286 /* clear op-in-progress flag */ 287 if (stream) 288 stream->op_in_progress = 0; 289 return ret; 290 } 291 292 comp_req->comn_mid.src_data_addr = 293 cookie->qat_sgl_src_phys_addr; 294 comp_req->comn_mid.dest_data_addr = 295 cookie->qat_sgl_dst_phys_addr; 296 comp_req->comn_mid.src_length = 0; 297 comp_req->comn_mid.dst_length = 0; 298 299 } else { 300 /* flat aka linear buffer */ 301 ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags, 302 QAT_COMN_PTR_TYPE_FLAT); 303 comp_req->comn_mid.src_length = op->src.length; 304 comp_req->comn_mid.dst_length = 305 comp_req->comp_pars.out_buffer_sz; 306 307 comp_req->comn_mid.src_data_addr = 308 rte_pktmbuf_iova_offset(op->m_src, op->src.offset); 309 comp_req->comn_mid.dest_data_addr = 310 rte_pktmbuf_iova_offset(op->m_dst, op->dst.offset); 311 } 312 313 if (unlikely(rte_pktmbuf_pkt_len(op->m_dst) < QAT_MIN_OUT_BUF_SIZE)) { 314 /* QAT doesn't support dest. buffer lower 315 * than QAT_MIN_OUT_BUF_SIZE. Propagate error mark 316 * by converting this request to the null one 317 * and check the status in the response. 318 */ 319 QAT_DP_LOG(WARNING, "QAT destination buffer too small - resend with larger buffer"); 320 comp_req->comn_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL; 321 comp_req->comn_hdr.service_cmd_id = ICP_QAT_FW_NULL_REQ_SERV_ID; 322 cookie->error = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED; 323 } 324 325 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG 326 QAT_DP_LOG(DEBUG, "Direction: %s", 327 qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ? 328 "decompression" : "compression"); 329 QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message:", comp_req, 330 sizeof(struct icp_qat_fw_comp_req)); 331 #endif 332 return 0; 333 } 334 335 static inline uint32_t 336 adf_modulo(uint32_t data, uint32_t modulo_mask) 337 { 338 return data & modulo_mask; 339 } 340 341 static inline void 342 qat_comp_mbuf_skip(struct rte_mbuf **mbuf, uint32_t *offset, uint32_t len) 343 { 344 while (*offset + len >= rte_pktmbuf_data_len(*mbuf)) { 345 len -= (rte_pktmbuf_data_len(*mbuf) - *offset); 346 *mbuf = (*mbuf)->next; 347 *offset = 0; 348 } 349 *offset = len; 350 } 351 352 int 353 qat_comp_build_multiple_requests(void *in_op, struct qat_qp *qp, 354 uint32_t parent_tail, int nb_descr) 355 { 356 struct rte_comp_op op_backup; 357 struct rte_mbuf dst_mbuf; 358 struct rte_comp_op *op = in_op; 359 struct qat_queue *txq = &(qp->tx_q); 360 uint8_t *base_addr = (uint8_t *)txq->base_addr; 361 uint8_t *out_msg = base_addr + parent_tail; 362 uint32_t tail = parent_tail; 363 struct icp_qat_fw_comp_req *comp_req = 364 (struct icp_qat_fw_comp_req *)out_msg; 365 struct qat_comp_op_cookie *parent_cookie = 366 (struct qat_comp_op_cookie *) 367 qp->op_cookies[parent_tail / txq->msg_size]; 368 struct qat_comp_op_cookie *child_cookie; 369 uint16_t dst_data_size = 370 RTE_MIN(RTE_PMD_QAT_COMP_IM_BUFFER_SIZE, 65535); 371 uint32_t data_to_enqueue = op->src.length - QAT_FALLBACK_THLD; 372 int num_descriptors_built = 1; 373 int ret; 374 375 QAT_DP_LOG(DEBUG, "op %p, parent_cookie %p", op, parent_cookie); 376 377 /* copy original op to the local variable for restoring later */ 378 rte_memcpy(&op_backup, op, sizeof(op_backup)); 379 380 parent_cookie->nb_child_responses = 0; 381 parent_cookie->nb_children = 0; 382 parent_cookie->split_op = 1; 383 parent_cookie->dst_data = op->m_dst; 384 parent_cookie->dst_data_offset = op->dst.offset; 385 386 op->src.length = QAT_FALLBACK_THLD; 387 op->flush_flag = RTE_COMP_FLUSH_FULL; 388 389 QAT_DP_LOG(DEBUG, "parent op src len %u dst len %u", 390 op->src.length, op->m_dst->pkt_len); 391 392 ret = qat_comp_build_request(in_op, out_msg, parent_cookie, 393 qp->qat_dev_gen); 394 if (ret != 0) { 395 /* restore op and clear cookie */ 396 QAT_DP_LOG(WARNING, "Failed to build parent descriptor"); 397 op->src.length = op_backup.src.length; 398 op->flush_flag = op_backup.flush_flag; 399 parent_cookie->split_op = 0; 400 return ret; 401 } 402 403 /* prepare local dst mbuf */ 404 rte_memcpy(&dst_mbuf, op->m_dst, sizeof(dst_mbuf)); 405 rte_pktmbuf_reset(&dst_mbuf); 406 dst_mbuf.buf_len = dst_data_size; 407 dst_mbuf.data_len = dst_data_size; 408 dst_mbuf.pkt_len = dst_data_size; 409 dst_mbuf.data_off = 0; 410 411 /* update op for the child operations */ 412 op->m_dst = &dst_mbuf; 413 op->dst.offset = 0; 414 415 while (data_to_enqueue) { 416 const struct rte_memzone *mz = 417 parent_cookie->dst_memzones[num_descriptors_built - 1]; 418 uint32_t src_data_size = RTE_MIN(data_to_enqueue, 419 QAT_FALLBACK_THLD); 420 uint32_t cookie_index; 421 422 /* update params for the next op */ 423 op->src.offset += QAT_FALLBACK_THLD; 424 op->src.length = src_data_size; 425 op->flush_flag = (src_data_size == data_to_enqueue) ? 426 op_backup.flush_flag : RTE_COMP_FLUSH_FULL; 427 428 /* update dst mbuf for the next op (use memzone for dst data) */ 429 dst_mbuf.buf_addr = mz->addr; 430 dst_mbuf.buf_iova = mz->iova; 431 432 /* move the tail and calculate next cookie index */ 433 tail = adf_modulo(tail + txq->msg_size, txq->modulo_mask); 434 cookie_index = tail / txq->msg_size; 435 child_cookie = (struct qat_comp_op_cookie *) 436 qp->op_cookies[cookie_index]; 437 comp_req = (struct icp_qat_fw_comp_req *)(base_addr + tail); 438 439 /* update child cookie */ 440 child_cookie->split_op = 1; /* must be set for child as well */ 441 child_cookie->parent_cookie = parent_cookie; /* same as above */ 442 child_cookie->nb_children = 0; 443 child_cookie->dest_buffer = mz->addr; 444 445 QAT_DP_LOG(DEBUG, 446 "cookie_index %u, child_cookie %p, comp_req %p", 447 cookie_index, child_cookie, comp_req); 448 QAT_DP_LOG(DEBUG, 449 "data_to_enqueue %u, num_descriptors_built %d", 450 data_to_enqueue, num_descriptors_built); 451 QAT_DP_LOG(DEBUG, "child op src len %u dst len %u", 452 op->src.length, op->m_dst->pkt_len); 453 454 /* build the request */ 455 ret = qat_comp_build_request(op, (uint8_t *)comp_req, 456 child_cookie, qp->qat_dev_gen); 457 if (ret < 0) { 458 QAT_DP_LOG(WARNING, "Failed to build child descriptor"); 459 /* restore op and clear cookie */ 460 rte_memcpy(op, &op_backup, sizeof(op_backup)); 461 parent_cookie->split_op = 0; 462 parent_cookie->nb_children = 0; 463 return ret; 464 } 465 466 data_to_enqueue -= src_data_size; 467 num_descriptors_built++; 468 } 469 470 /* restore backed up original op */ 471 rte_memcpy(op, &op_backup, sizeof(op_backup)); 472 473 if (nb_descr != num_descriptors_built) 474 QAT_DP_LOG(ERR, "split op. expected %d, built %d", 475 nb_descr, num_descriptors_built); 476 477 parent_cookie->nb_children = num_descriptors_built - 1; 478 return num_descriptors_built; 479 } 480 481 static inline void 482 qat_comp_response_data_copy(struct qat_comp_op_cookie *cookie, 483 struct rte_comp_op *rx_op) 484 { 485 struct qat_comp_op_cookie *pc = cookie->parent_cookie; 486 struct rte_mbuf *sgl_buf = pc->dst_data; 487 void *op_dst_addr = rte_pktmbuf_mtod_offset(sgl_buf, uint8_t *, 488 pc->dst_data_offset); 489 490 /* number of bytes left in the current segment */ 491 uint32_t left_in_current = rte_pktmbuf_data_len(sgl_buf) - 492 pc->dst_data_offset; 493 494 uint32_t prod, sent; 495 496 if (rx_op->produced <= left_in_current) { 497 rte_memcpy(op_dst_addr, cookie->dest_buffer, 498 rx_op->produced); 499 /* calculate dst mbuf and offset for the next child op */ 500 if (rx_op->produced == left_in_current) { 501 pc->dst_data = sgl_buf->next; 502 pc->dst_data_offset = 0; 503 } else 504 pc->dst_data_offset += rx_op->produced; 505 } else { 506 rte_memcpy(op_dst_addr, cookie->dest_buffer, 507 left_in_current); 508 sgl_buf = sgl_buf->next; 509 prod = rx_op->produced - left_in_current; 510 sent = left_in_current; 511 while (prod > rte_pktmbuf_data_len(sgl_buf)) { 512 op_dst_addr = rte_pktmbuf_mtod_offset(sgl_buf, 513 uint8_t *, 0); 514 515 rte_memcpy(op_dst_addr, 516 ((uint8_t *)cookie->dest_buffer) + 517 sent, 518 rte_pktmbuf_data_len(sgl_buf)); 519 520 prod -= rte_pktmbuf_data_len(sgl_buf); 521 sent += rte_pktmbuf_data_len(sgl_buf); 522 523 sgl_buf = sgl_buf->next; 524 } 525 526 op_dst_addr = rte_pktmbuf_mtod_offset(sgl_buf, uint8_t *, 0); 527 528 rte_memcpy(op_dst_addr, 529 ((uint8_t *)cookie->dest_buffer) + sent, 530 prod); 531 532 /* calculate dst mbuf and offset for the next child op */ 533 if (prod == rte_pktmbuf_data_len(sgl_buf)) { 534 pc->dst_data = sgl_buf->next; 535 pc->dst_data_offset = 0; 536 } else { 537 pc->dst_data = sgl_buf; 538 pc->dst_data_offset = prod; 539 } 540 } 541 } 542 543 int 544 qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie, 545 uint64_t *dequeue_err_count) 546 { 547 struct icp_qat_fw_comp_resp *resp_msg = 548 (struct icp_qat_fw_comp_resp *)resp; 549 struct qat_comp_op_cookie *cookie = 550 (struct qat_comp_op_cookie *)op_cookie; 551 552 struct icp_qat_fw_resp_comp_pars *comp_resp1 = 553 (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars; 554 555 QAT_DP_LOG(DEBUG, "input counter = %u, output counter = %u", 556 comp_resp1->input_byte_counter, 557 comp_resp1->output_byte_counter); 558 559 struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t) 560 (resp_msg->opaque_data); 561 struct qat_comp_stream *stream; 562 struct qat_comp_xform *qat_xform; 563 int err = resp_msg->comn_resp.comn_status & 564 ((1 << QAT_COMN_RESP_CMP_STATUS_BITPOS) | 565 (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS)); 566 567 if (rx_op->op_type == RTE_COMP_OP_STATEFUL) { 568 stream = rx_op->stream; 569 qat_xform = &stream->qat_xform; 570 /* clear op-in-progress flag */ 571 stream->op_in_progress = 0; 572 } else { 573 stream = NULL; 574 qat_xform = rx_op->private_xform; 575 } 576 577 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG 578 QAT_DP_LOG(DEBUG, "Direction: %s", 579 qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ? 580 "decompression" : "compression"); 581 QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg, 582 sizeof(struct icp_qat_fw_comp_resp)); 583 #endif 584 585 if (unlikely(cookie->error)) { 586 rx_op->status = cookie->error; 587 cookie->error = 0; 588 ++(*dequeue_err_count); 589 rx_op->debug_status = 0; 590 rx_op->consumed = 0; 591 rx_op->produced = 0; 592 *op = (void *)rx_op; 593 /* also in this case number of returned ops */ 594 /* must be equal to one, */ 595 /* appropriate status (error) must be set as well */ 596 return 1; 597 } 598 599 if (likely(qat_xform->qat_comp_request_type 600 != QAT_COMP_REQUEST_DECOMPRESS)) { 601 if (unlikely(ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET( 602 resp_msg->comn_resp.hdr_flags) 603 == ICP_QAT_FW_COMP_NO_CNV)) { 604 rx_op->status = RTE_COMP_OP_STATUS_ERROR; 605 rx_op->debug_status = ERR_CODE_QAT_COMP_WRONG_FW; 606 *op = (void *)rx_op; 607 QAT_DP_LOG(ERR, 608 "This QAT hardware doesn't support compression operation"); 609 ++(*dequeue_err_count); 610 return 1; 611 } 612 } 613 614 if (err) { 615 if (unlikely((err & (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS)) 616 && (qat_xform->qat_comp_request_type 617 == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS))) { 618 QAT_DP_LOG(ERR, "QAT intermediate buffer may be too " 619 "small for output, try configuring a larger size"); 620 } 621 622 int8_t cmp_err_code = 623 (int8_t)resp_msg->comn_resp.comn_error.cmp_err_code; 624 int8_t xlat_err_code = 625 (int8_t)resp_msg->comn_resp.comn_error.xlat_err_code; 626 627 /* handle recoverable out-of-buffer condition in stateful 628 * decompression scenario 629 */ 630 if (cmp_err_code == ERR_CODE_OVERFLOW_ERROR && !xlat_err_code 631 && qat_xform->qat_comp_request_type 632 == QAT_COMP_REQUEST_DECOMPRESS 633 && rx_op->op_type == RTE_COMP_OP_STATEFUL) { 634 struct icp_qat_fw_resp_comp_pars *comp_resp = 635 &resp_msg->comp_resp_pars; 636 rx_op->status = 637 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE; 638 rx_op->consumed = comp_resp->input_byte_counter; 639 rx_op->produced = comp_resp->output_byte_counter; 640 stream->start_of_packet = 0; 641 } else if ((cmp_err_code == ERR_CODE_OVERFLOW_ERROR 642 && !xlat_err_code) 643 || 644 (!cmp_err_code && xlat_err_code == ERR_CODE_OVERFLOW_ERROR) 645 || 646 (cmp_err_code == ERR_CODE_OVERFLOW_ERROR && 647 xlat_err_code == ERR_CODE_OVERFLOW_ERROR)){ 648 649 struct icp_qat_fw_resp_comp_pars *comp_resp = 650 (struct icp_qat_fw_resp_comp_pars *) 651 &resp_msg->comp_resp_pars; 652 653 /* handle recoverable out-of-buffer condition 654 * in stateless compression scenario 655 */ 656 if (comp_resp->input_byte_counter) { 657 if ((qat_xform->qat_comp_request_type 658 == QAT_COMP_REQUEST_FIXED_COMP_STATELESS) || 659 (qat_xform->qat_comp_request_type 660 == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) { 661 662 rx_op->status = 663 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE; 664 rx_op->consumed = 665 comp_resp->input_byte_counter; 666 rx_op->produced = 667 comp_resp->output_byte_counter; 668 } else 669 rx_op->status = 670 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED; 671 } else 672 rx_op->status = 673 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED; 674 } else 675 rx_op->status = RTE_COMP_OP_STATUS_ERROR; 676 677 ++(*dequeue_err_count); 678 rx_op->debug_status = 679 *((uint16_t *)(&resp_msg->comn_resp.comn_error)); 680 } else { 681 struct icp_qat_fw_resp_comp_pars *comp_resp = 682 (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars; 683 684 rx_op->status = RTE_COMP_OP_STATUS_SUCCESS; 685 rx_op->consumed = comp_resp->input_byte_counter; 686 rx_op->produced = comp_resp->output_byte_counter; 687 if (stream) 688 stream->start_of_packet = 0; 689 690 if (qat_xform->checksum_type != RTE_COMP_CHECKSUM_NONE) { 691 if (qat_xform->checksum_type == RTE_COMP_CHECKSUM_CRC32) 692 rx_op->output_chksum = comp_resp->curr_crc32; 693 else if (qat_xform->checksum_type == 694 RTE_COMP_CHECKSUM_ADLER32) 695 rx_op->output_chksum = comp_resp->curr_adler_32; 696 else 697 rx_op->output_chksum = comp_resp->curr_chksum; 698 } 699 } 700 QAT_DP_LOG(DEBUG, "About to check for split op :cookies: %p %p, split:%u", 701 cookie, cookie->parent_cookie, cookie->split_op); 702 703 if (cookie->split_op) { 704 *op = NULL; 705 struct qat_comp_op_cookie *pc = cookie->parent_cookie; 706 707 if (cookie->nb_children > 0) { 708 QAT_DP_LOG(DEBUG, "Parent"); 709 /* parent - don't return until all children 710 * responses are collected 711 */ 712 cookie->total_consumed = rx_op->consumed; 713 cookie->total_produced = rx_op->produced; 714 if (err) { 715 cookie->error = rx_op->status; 716 rx_op->status = RTE_COMP_OP_STATUS_SUCCESS; 717 } else { 718 /* calculate dst mbuf and offset for child op */ 719 qat_comp_mbuf_skip(&cookie->dst_data, 720 &cookie->dst_data_offset, 721 rx_op->produced); 722 } 723 } else { 724 QAT_DP_LOG(DEBUG, "Child"); 725 if (pc->error == RTE_COMP_OP_STATUS_SUCCESS) { 726 if (err) 727 pc->error = rx_op->status; 728 if (rx_op->produced) { 729 /* this covers both SUCCESS and 730 * OUT_OF_SPACE_RECOVERABLE cases 731 */ 732 qat_comp_response_data_copy(cookie, 733 rx_op); 734 pc->total_consumed += rx_op->consumed; 735 pc->total_produced += rx_op->produced; 736 } 737 } 738 rx_op->status = RTE_COMP_OP_STATUS_SUCCESS; 739 740 pc->nb_child_responses++; 741 742 /* (child) cookie fields have to be reset 743 * to avoid problems with reusability - 744 * rx and tx queue starting from index zero 745 */ 746 cookie->nb_children = 0; 747 cookie->split_op = 0; 748 cookie->nb_child_responses = 0; 749 cookie->dest_buffer = NULL; 750 751 if (pc->nb_child_responses == pc->nb_children) { 752 uint8_t child_resp; 753 754 /* parent should be included as well */ 755 child_resp = pc->nb_child_responses + 1; 756 757 rx_op->status = pc->error; 758 rx_op->consumed = pc->total_consumed; 759 rx_op->produced = pc->total_produced; 760 *op = (void *)rx_op; 761 762 /* free memzones used for dst data */ 763 qat_comp_free_split_op_memzones(pc, 764 pc->nb_children); 765 766 /* (parent) cookie fields have to be reset 767 * to avoid problems with reusability - 768 * rx and tx queue starting from index zero 769 */ 770 pc->nb_children = 0; 771 pc->split_op = 0; 772 pc->nb_child_responses = 0; 773 pc->error = RTE_COMP_OP_STATUS_SUCCESS; 774 775 return child_resp; 776 } 777 } 778 return 0; 779 } 780 781 *op = (void *)rx_op; 782 return 1; 783 } 784 785 unsigned int 786 qat_comp_xform_size(void) 787 { 788 return RTE_ALIGN_CEIL(sizeof(struct qat_comp_xform), 8); 789 } 790 791 unsigned int 792 qat_comp_stream_size(void) 793 { 794 return RTE_ALIGN_CEIL(sizeof(struct qat_comp_stream), 8); 795 } 796 797 static void 798 qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header, 799 enum qat_comp_request_type request) 800 { 801 if (request == QAT_COMP_REQUEST_FIXED_COMP_STATELESS) 802 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC; 803 else if (request == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS) 804 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC; 805 else if (request == QAT_COMP_REQUEST_DECOMPRESS) 806 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS; 807 808 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP; 809 header->hdr_flags = 810 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); 811 812 header->comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD( 813 QAT_COMN_CD_FLD_TYPE_16BYTE_DATA, QAT_COMN_PTR_TYPE_FLAT); 814 } 815 816 static int 817 qat_comp_create_templates(struct qat_comp_xform *qat_xform, 818 const struct rte_memzone *interm_buff_mz, 819 const struct rte_comp_xform *xform, 820 const struct qat_comp_stream *stream, 821 enum rte_comp_op_type op_type, 822 enum qat_device_gen qat_dev_gen) 823 { 824 struct icp_qat_fw_comp_req *comp_req; 825 uint32_t req_par_flags; 826 int res; 827 828 if (unlikely(qat_xform == NULL)) { 829 QAT_LOG(ERR, "Session was not created for this device"); 830 return -EINVAL; 831 } 832 833 if (op_type == RTE_COMP_OP_STATEFUL) { 834 if (unlikely(stream == NULL)) { 835 QAT_LOG(ERR, "Stream must be non null for stateful op"); 836 return -EINVAL; 837 } 838 if (unlikely(qat_xform->qat_comp_request_type != 839 QAT_COMP_REQUEST_DECOMPRESS)) { 840 QAT_LOG(ERR, "QAT PMD does not support stateful compression"); 841 return -ENOTSUP; 842 } 843 } 844 845 if (qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) 846 req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD( 847 ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP, 848 ICP_QAT_FW_COMP_BFINAL, 849 ICP_QAT_FW_COMP_CNV, 850 ICP_QAT_FW_COMP_CNV_RECOVERY); 851 else 852 req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD( 853 ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP, 854 ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_CNV, 855 ICP_QAT_FW_COMP_CNV_RECOVERY); 856 857 comp_req = &qat_xform->qat_comp_req_tmpl; 858 859 /* Initialize header */ 860 qat_comp_create_req_hdr(&comp_req->comn_hdr, 861 qat_xform->qat_comp_request_type); 862 863 if (op_type == RTE_COMP_OP_STATEFUL) { 864 comp_req->comn_hdr.serv_specif_flags = 865 ICP_QAT_FW_COMP_FLAGS_BUILD( 866 ICP_QAT_FW_COMP_STATEFUL_SESSION, 867 ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST, 868 ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST, 869 ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST, 870 ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF); 871 872 /* Decompression state registers */ 873 comp_req->comp_cd_ctrl.comp_state_addr = 874 stream->state_registers_decomp_phys; 875 876 /* RAM bank flags */ 877 comp_req->comp_cd_ctrl.ram_bank_flags = 878 qat_comp_gen_dev_ops[qat_dev_gen] 879 .qat_comp_get_ram_bank_flags(); 880 881 comp_req->comp_cd_ctrl.ram_banks_addr = 882 stream->inflate_context_phys; 883 } else { 884 comp_req->comn_hdr.serv_specif_flags = 885 ICP_QAT_FW_COMP_FLAGS_BUILD( 886 ICP_QAT_FW_COMP_STATELESS_SESSION, 887 ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST, 888 ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST, 889 ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST, 890 ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF); 891 } 892 893 res = qat_comp_gen_dev_ops[qat_dev_gen].qat_comp_set_slice_cfg_word( 894 qat_xform, xform, op_type, 895 comp_req->cd_pars.sl.comp_slice_cfg_word); 896 if (res) 897 return res; 898 899 comp_req->comp_pars.initial_adler = 1; 900 comp_req->comp_pars.initial_crc32 = 0; 901 comp_req->comp_pars.req_par_flags = req_par_flags; 902 903 904 if (qat_xform->qat_comp_request_type == 905 QAT_COMP_REQUEST_FIXED_COMP_STATELESS || 906 qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) { 907 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl, 908 ICP_QAT_FW_SLICE_DRAM_WR); 909 ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl, 910 ICP_QAT_FW_SLICE_COMP); 911 } else if (qat_xform->qat_comp_request_type == 912 QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS) { 913 914 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl, 915 ICP_QAT_FW_SLICE_XLAT); 916 ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl, 917 ICP_QAT_FW_SLICE_COMP); 918 919 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->u2.xlt_cd_ctrl, 920 ICP_QAT_FW_SLICE_DRAM_WR); 921 ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->u2.xlt_cd_ctrl, 922 ICP_QAT_FW_SLICE_XLAT); 923 924 comp_req->u1.xlt_pars.inter_buff_ptr = 925 (qat_comp_get_num_im_bufs_required(qat_dev_gen) 926 == 0) ? 0 : interm_buff_mz->iova; 927 } 928 929 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG 930 QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message template:", comp_req, 931 sizeof(struct icp_qat_fw_comp_req)); 932 #endif 933 return 0; 934 } 935 936 /** 937 * Create driver private_xform data. 938 * 939 * @param dev 940 * Compressdev device 941 * @param xform 942 * xform data from application 943 * @param private_xform 944 * ptr where handle of pmd's private_xform data should be stored 945 * @return 946 * - if successful returns 0 947 * and valid private_xform handle 948 * - <0 in error cases 949 * - Returns -EINVAL if input parameters are invalid. 950 * - Returns -ENOTSUP if comp device does not support the comp transform. 951 * - Returns -ENOMEM if the private_xform could not be allocated. 952 */ 953 int 954 qat_comp_private_xform_create(struct rte_compressdev *dev, 955 const struct rte_comp_xform *xform, 956 void **private_xform) 957 { 958 struct qat_comp_dev_private *qat = dev->data->dev_private; 959 enum qat_device_gen qat_dev_gen = qat->qat_dev->qat_dev_gen; 960 unsigned int im_bufs = qat_comp_get_num_im_bufs_required(qat_dev_gen); 961 962 if (unlikely(private_xform == NULL)) { 963 QAT_LOG(ERR, "QAT: private_xform parameter is NULL"); 964 return -EINVAL; 965 } 966 if (unlikely(qat->xformpool == NULL)) { 967 QAT_LOG(ERR, "QAT device has no private_xform mempool"); 968 return -ENOMEM; 969 } 970 if (rte_mempool_get(qat->xformpool, private_xform)) { 971 QAT_LOG(ERR, "Couldn't get object from qat xform mempool"); 972 return -ENOMEM; 973 } 974 975 struct qat_comp_xform *qat_xform = 976 (struct qat_comp_xform *)*private_xform; 977 978 if (xform->type == RTE_COMP_COMPRESS) { 979 980 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED || 981 ((xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_DEFAULT) 982 && qat->interm_buff_mz == NULL 983 && im_bufs > 0)) 984 qat_xform->qat_comp_request_type = 985 QAT_COMP_REQUEST_FIXED_COMP_STATELESS; 986 987 else if ((xform->compress.deflate.huffman == 988 RTE_COMP_HUFFMAN_DYNAMIC || 989 xform->compress.deflate.huffman == 990 RTE_COMP_HUFFMAN_DEFAULT) && 991 (qat->interm_buff_mz != NULL || 992 im_bufs == 0)) 993 994 qat_xform->qat_comp_request_type = 995 QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS; 996 997 else { 998 QAT_LOG(ERR, 999 "IM buffers needed for dynamic deflate. Set size in config file"); 1000 return -EINVAL; 1001 } 1002 1003 qat_xform->checksum_type = xform->compress.chksum; 1004 1005 } else { 1006 qat_xform->qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS; 1007 qat_xform->checksum_type = xform->decompress.chksum; 1008 } 1009 1010 if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform, 1011 NULL, RTE_COMP_OP_STATELESS, 1012 qat_dev_gen)) { 1013 QAT_LOG(ERR, "QAT: Problem with setting compression"); 1014 return -EINVAL; 1015 } 1016 return 0; 1017 } 1018 1019 /** 1020 * Free driver private_xform data. 1021 * 1022 * @param dev 1023 * Compressdev device 1024 * @param private_xform 1025 * handle of pmd's private_xform data 1026 * @return 1027 * - 0 if successful 1028 * - <0 in error cases 1029 * - Returns -EINVAL if input parameters are invalid. 1030 */ 1031 int 1032 qat_comp_private_xform_free(struct rte_compressdev *dev __rte_unused, 1033 void *private_xform) 1034 { 1035 struct qat_comp_xform *qat_xform = 1036 (struct qat_comp_xform *)private_xform; 1037 1038 if (qat_xform) { 1039 memset(qat_xform, 0, qat_comp_xform_size()); 1040 struct rte_mempool *mp = rte_mempool_from_obj(qat_xform); 1041 1042 rte_mempool_put(mp, qat_xform); 1043 return 0; 1044 } 1045 return -EINVAL; 1046 } 1047 1048 /** 1049 * Reset stream state for the next use. 1050 * 1051 * @param stream 1052 * handle of pmd's private stream data 1053 */ 1054 static void 1055 qat_comp_stream_reset(struct qat_comp_stream *stream) 1056 { 1057 if (stream) { 1058 memset(&stream->qat_xform, 0, sizeof(struct qat_comp_xform)); 1059 stream->start_of_packet = 1; 1060 stream->op_in_progress = 0; 1061 } 1062 } 1063 1064 /** 1065 * Create driver private stream data. 1066 * 1067 * @param dev 1068 * Compressdev device 1069 * @param xform 1070 * xform data 1071 * @param stream 1072 * ptr where handle of pmd's private stream data should be stored 1073 * @return 1074 * - Returns 0 if private stream structure has been created successfully. 1075 * - Returns -EINVAL if input parameters are invalid. 1076 * - Returns -ENOTSUP if comp device does not support STATEFUL operations. 1077 * - Returns -ENOTSUP if comp device does not support the comp transform. 1078 * - Returns -ENOMEM if the private stream could not be allocated. 1079 */ 1080 int 1081 qat_comp_stream_create(struct rte_compressdev *dev, 1082 const struct rte_comp_xform *xform, 1083 void **stream) 1084 { 1085 struct qat_comp_dev_private *qat = dev->data->dev_private; 1086 struct qat_comp_stream *ptr; 1087 1088 if (unlikely(stream == NULL)) { 1089 QAT_LOG(ERR, "QAT: stream parameter is NULL"); 1090 return -EINVAL; 1091 } 1092 if (unlikely(xform->type == RTE_COMP_COMPRESS)) { 1093 QAT_LOG(ERR, "QAT: stateful compression not supported"); 1094 return -ENOTSUP; 1095 } 1096 if (unlikely(qat->streampool == NULL)) { 1097 QAT_LOG(ERR, "QAT device has no stream mempool"); 1098 return -ENOMEM; 1099 } 1100 if (rte_mempool_get(qat->streampool, stream)) { 1101 QAT_LOG(ERR, "Couldn't get object from qat stream mempool"); 1102 return -ENOMEM; 1103 } 1104 1105 ptr = (struct qat_comp_stream *) *stream; 1106 qat_comp_stream_reset(ptr); 1107 ptr->qat_xform.qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS; 1108 ptr->qat_xform.checksum_type = xform->decompress.chksum; 1109 1110 if (qat_comp_create_templates(&ptr->qat_xform, qat->interm_buff_mz, 1111 xform, ptr, RTE_COMP_OP_STATEFUL, 1112 qat->qat_dev->qat_dev_gen)) { 1113 QAT_LOG(ERR, "QAT: problem with creating descriptor template for stream"); 1114 rte_mempool_put(qat->streampool, *stream); 1115 *stream = NULL; 1116 return -EINVAL; 1117 } 1118 1119 return 0; 1120 } 1121 1122 /** 1123 * Free driver private stream data. 1124 * 1125 * @param dev 1126 * Compressdev device 1127 * @param stream 1128 * handle of pmd's private stream data 1129 * @return 1130 * - 0 if successful 1131 * - <0 in error cases 1132 * - Returns -EINVAL if input parameters are invalid. 1133 * - Returns -ENOTSUP if comp device does not support STATEFUL operations. 1134 * - Returns -EBUSY if can't free stream as there are inflight operations 1135 */ 1136 int 1137 qat_comp_stream_free(struct rte_compressdev *dev, void *stream) 1138 { 1139 if (stream) { 1140 struct qat_comp_dev_private *qat = dev->data->dev_private; 1141 qat_comp_stream_reset((struct qat_comp_stream *) stream); 1142 rte_mempool_put(qat->streampool, stream); 1143 return 0; 1144 } 1145 return -EINVAL; 1146 } 1147