xref: /dpdk/drivers/compress/qat/qat_comp.c (revision 7cb939f6485ea8e4d6b1e8e82924fe2fcec96d60)
17a34c215SFiona Trahe /* SPDX-License-Identifier: BSD-3-Clause
24c6912d3SFan Zhang  * Copyright(c) 2018-2021 Intel Corporation
37a34c215SFiona Trahe  */
47a34c215SFiona Trahe 
56a7ea148SFiona Trahe #include <rte_mempool.h>
66a7ea148SFiona Trahe #include <rte_mbuf.h>
76a7ea148SFiona Trahe #include <rte_hexdump.h>
86a7ea148SFiona Trahe #include <rte_comp.h>
91f37cb2bSDavid Marchand #include <bus_pci_driver.h>
106a7ea148SFiona Trahe #include <rte_byteorder.h>
116a7ea148SFiona Trahe #include <rte_memcpy.h>
126a7ea148SFiona Trahe #include <rte_common.h>
136a7ea148SFiona Trahe #include <rte_spinlock.h>
146a7ea148SFiona Trahe #include <rte_log.h>
156a7ea148SFiona Trahe #include <rte_malloc.h>
16c13cecf6SAdam Dybkowski #include <rte_memzone.h>
176a7ea148SFiona Trahe 
186a7ea148SFiona Trahe #include "qat_logs.h"
197a34c215SFiona Trahe #include "qat_comp.h"
206a7ea148SFiona Trahe #include "qat_comp_pmd.h"
216a7ea148SFiona Trahe 
22c13cecf6SAdam Dybkowski static void
qat_comp_fallback_to_fixed(struct icp_qat_fw_comp_req * comp_req)23c13cecf6SAdam Dybkowski qat_comp_fallback_to_fixed(struct icp_qat_fw_comp_req *comp_req)
24c13cecf6SAdam Dybkowski {
25c13cecf6SAdam Dybkowski 	QAT_DP_LOG(DEBUG, "QAT PMD: fallback to fixed compression!");
26c13cecf6SAdam Dybkowski 
27c13cecf6SAdam Dybkowski 	comp_req->comn_hdr.service_cmd_id =
28c13cecf6SAdam Dybkowski 			ICP_QAT_FW_COMP_CMD_STATIC;
29c13cecf6SAdam Dybkowski 
30c13cecf6SAdam Dybkowski 	ICP_QAT_FW_COMN_NEXT_ID_SET(
31c13cecf6SAdam Dybkowski 			&comp_req->comp_cd_ctrl,
32c13cecf6SAdam Dybkowski 			ICP_QAT_FW_SLICE_DRAM_WR);
33c13cecf6SAdam Dybkowski 
34c13cecf6SAdam Dybkowski 	ICP_QAT_FW_COMN_NEXT_ID_SET(
35c13cecf6SAdam Dybkowski 			&comp_req->u2.xlt_cd_ctrl,
36c13cecf6SAdam Dybkowski 			ICP_QAT_FW_SLICE_NULL);
37c13cecf6SAdam Dybkowski 	ICP_QAT_FW_COMN_CURR_ID_SET(
38c13cecf6SAdam Dybkowski 			&comp_req->u2.xlt_cd_ctrl,
39c13cecf6SAdam Dybkowski 			ICP_QAT_FW_SLICE_NULL);
40c13cecf6SAdam Dybkowski }
41c13cecf6SAdam Dybkowski 
42c13cecf6SAdam Dybkowski void
qat_comp_free_split_op_memzones(struct qat_comp_op_cookie * cookie,unsigned int nb_children)43c13cecf6SAdam Dybkowski qat_comp_free_split_op_memzones(struct qat_comp_op_cookie *cookie,
44c13cecf6SAdam Dybkowski 				unsigned int nb_children)
45c13cecf6SAdam Dybkowski {
46c13cecf6SAdam Dybkowski 	unsigned int i;
47c13cecf6SAdam Dybkowski 
48c13cecf6SAdam Dybkowski 	/* free all memzones allocated for child descriptors */
49c13cecf6SAdam Dybkowski 	for (i = 0; i < nb_children; i++)
50c13cecf6SAdam Dybkowski 		rte_memzone_free(cookie->dst_memzones[i]);
51c13cecf6SAdam Dybkowski 
52c13cecf6SAdam Dybkowski 	/* and free the pointer table */
53c13cecf6SAdam Dybkowski 	rte_free(cookie->dst_memzones);
54c13cecf6SAdam Dybkowski 	cookie->dst_memzones = NULL;
55c13cecf6SAdam Dybkowski }
56c13cecf6SAdam Dybkowski 
57c13cecf6SAdam Dybkowski static int
qat_comp_allocate_split_op_memzones(struct qat_comp_op_cookie * cookie,unsigned int nb_descriptors_needed)58c13cecf6SAdam Dybkowski qat_comp_allocate_split_op_memzones(struct qat_comp_op_cookie *cookie,
59c13cecf6SAdam Dybkowski 				    unsigned int nb_descriptors_needed)
60c13cecf6SAdam Dybkowski {
61c13cecf6SAdam Dybkowski 	struct qat_queue *txq = &(cookie->qp->tx_q);
62c13cecf6SAdam Dybkowski 	char dst_memz_name[RTE_MEMZONE_NAMESIZE];
63c13cecf6SAdam Dybkowski 	unsigned int i;
64c13cecf6SAdam Dybkowski 
65c13cecf6SAdam Dybkowski 	/* allocate the array of memzone pointers */
66c13cecf6SAdam Dybkowski 	cookie->dst_memzones = rte_zmalloc_socket("qat PMD im buf mz pointers",
67c13cecf6SAdam Dybkowski 			(nb_descriptors_needed - 1) *
68c13cecf6SAdam Dybkowski 				sizeof(const struct rte_memzone *),
69c13cecf6SAdam Dybkowski 			RTE_CACHE_LINE_SIZE, cookie->socket_id);
70c13cecf6SAdam Dybkowski 
71c13cecf6SAdam Dybkowski 	if (cookie->dst_memzones == NULL) {
72c13cecf6SAdam Dybkowski 		QAT_DP_LOG(ERR,
73c13cecf6SAdam Dybkowski 			"QAT PMD: failed to allocate im buf mz pointers");
74c13cecf6SAdam Dybkowski 		return -ENOMEM;
75c13cecf6SAdam Dybkowski 	}
76c13cecf6SAdam Dybkowski 
77c13cecf6SAdam Dybkowski 	for (i = 0; i < nb_descriptors_needed - 1; i++) {
78c13cecf6SAdam Dybkowski 		snprintf(dst_memz_name,
79c13cecf6SAdam Dybkowski 				sizeof(dst_memz_name),
80c13cecf6SAdam Dybkowski 				"dst_%u_%u_%u_%u_%u",
81c13cecf6SAdam Dybkowski 				cookie->qp->qat_dev->qat_dev_id,
82c13cecf6SAdam Dybkowski 				txq->hw_bundle_number, txq->hw_queue_number,
83c13cecf6SAdam Dybkowski 				cookie->cookie_index, i);
84c13cecf6SAdam Dybkowski 
85c13cecf6SAdam Dybkowski 		cookie->dst_memzones[i] = rte_memzone_reserve_aligned(
86c13cecf6SAdam Dybkowski 				dst_memz_name, RTE_PMD_QAT_COMP_IM_BUFFER_SIZE,
87c13cecf6SAdam Dybkowski 				cookie->socket_id, RTE_MEMZONE_IOVA_CONTIG,
88c13cecf6SAdam Dybkowski 				RTE_CACHE_LINE_SIZE);
89c13cecf6SAdam Dybkowski 
90c13cecf6SAdam Dybkowski 		if (cookie->dst_memzones[i] == NULL) {
91c13cecf6SAdam Dybkowski 			QAT_DP_LOG(ERR,
92c13cecf6SAdam Dybkowski 				"QAT PMD: failed to allocate dst buffer memzone");
93c13cecf6SAdam Dybkowski 
94c13cecf6SAdam Dybkowski 			/* let's free all memzones allocated up to now */
95c13cecf6SAdam Dybkowski 			qat_comp_free_split_op_memzones(cookie, i);
96c13cecf6SAdam Dybkowski 
97c13cecf6SAdam Dybkowski 			return -ENOMEM;
98c13cecf6SAdam Dybkowski 		}
99c13cecf6SAdam Dybkowski 	}
100c13cecf6SAdam Dybkowski 
101c13cecf6SAdam Dybkowski 	return 0;
102c13cecf6SAdam Dybkowski }
10332842f2aSFiona Trahe 
10432842f2aSFiona Trahe int
qat_comp_build_request(void * in_op,uint8_t * out_msg,void * op_cookie,enum qat_device_gen qat_dev_gen __rte_unused)10532842f2aSFiona Trahe qat_comp_build_request(void *in_op, uint8_t *out_msg,
1061947bd18SFiona Trahe 		       void *op_cookie,
10732842f2aSFiona Trahe 		       enum qat_device_gen qat_dev_gen __rte_unused)
10832842f2aSFiona Trahe {
10932842f2aSFiona Trahe 	struct rte_comp_op *op = in_op;
1101947bd18SFiona Trahe 	struct qat_comp_op_cookie *cookie =
1111947bd18SFiona Trahe 			(struct qat_comp_op_cookie *)op_cookie;
11282822753SAdam Dybkowski 	struct qat_comp_stream *stream;
11382822753SAdam Dybkowski 	struct qat_comp_xform *qat_xform;
11482822753SAdam Dybkowski 	const uint8_t *tmpl;
11532842f2aSFiona Trahe 	struct icp_qat_fw_comp_req *comp_req =
11632842f2aSFiona Trahe 	    (struct icp_qat_fw_comp_req *)out_msg;
11732842f2aSFiona Trahe 
11882822753SAdam Dybkowski 	if (op->op_type == RTE_COMP_OP_STATEFUL) {
11982822753SAdam Dybkowski 		stream = op->stream;
12082822753SAdam Dybkowski 		qat_xform = &stream->qat_xform;
12182822753SAdam Dybkowski 		if (unlikely(qat_xform->qat_comp_request_type !=
12282822753SAdam Dybkowski 			     QAT_COMP_REQUEST_DECOMPRESS)) {
12382822753SAdam Dybkowski 			QAT_DP_LOG(ERR, "QAT PMD does not support stateful compression");
124652b59dfSTomasz Jozwiak 			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
12532842f2aSFiona Trahe 			return -EINVAL;
12632842f2aSFiona Trahe 		}
12782822753SAdam Dybkowski 		if (unlikely(stream->op_in_progress)) {
12882822753SAdam Dybkowski 			QAT_DP_LOG(ERR, "QAT PMD does not support running multiple stateful operations on the same stream at once");
12982822753SAdam Dybkowski 			op->status = RTE_COMP_OP_STATUS_INVALID_STATE;
13082822753SAdam Dybkowski 			return -EINVAL;
13182822753SAdam Dybkowski 		}
13282822753SAdam Dybkowski 		stream->op_in_progress = 1;
13382822753SAdam Dybkowski 	} else {
13482822753SAdam Dybkowski 		stream = NULL;
13582822753SAdam Dybkowski 		qat_xform = op->private_xform;
13682822753SAdam Dybkowski 	}
13782822753SAdam Dybkowski 	tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
13832842f2aSFiona Trahe 
13932842f2aSFiona Trahe 	rte_mov128(out_msg, tmpl);
14032842f2aSFiona Trahe 	comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
14132842f2aSFiona Trahe 
142c13cecf6SAdam Dybkowski 	if (likely(qat_xform->qat_comp_request_type ==
143c13cecf6SAdam Dybkowski 			QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) {
144c13cecf6SAdam Dybkowski 
145c13cecf6SAdam Dybkowski 		if (unlikely(op->src.length > QAT_FALLBACK_THLD)) {
146c13cecf6SAdam Dybkowski 			/* the operation must be split into pieces */
147c13cecf6SAdam Dybkowski 			if (qat_xform->checksum_type !=
148c13cecf6SAdam Dybkowski 					RTE_COMP_CHECKSUM_NONE) {
149c13cecf6SAdam Dybkowski 				/* fallback to fixed compression in case any
150c13cecf6SAdam Dybkowski 				 * checksum calculation was requested
151c13cecf6SAdam Dybkowski 				 */
152c13cecf6SAdam Dybkowski 				qat_comp_fallback_to_fixed(comp_req);
153c13cecf6SAdam Dybkowski 			} else {
154c13cecf6SAdam Dybkowski 				/* calculate num. of descriptors for split op */
155c13cecf6SAdam Dybkowski 				unsigned int nb_descriptors_needed =
156c13cecf6SAdam Dybkowski 					op->src.length / QAT_FALLBACK_THLD + 1;
157c13cecf6SAdam Dybkowski 				/* allocate memzone for output data */
158c13cecf6SAdam Dybkowski 				if (qat_comp_allocate_split_op_memzones(
159c13cecf6SAdam Dybkowski 					       cookie, nb_descriptors_needed)) {
160c13cecf6SAdam Dybkowski 					/* out of memory, fallback to fixed */
161c13cecf6SAdam Dybkowski 					qat_comp_fallback_to_fixed(comp_req);
162c13cecf6SAdam Dybkowski 				} else {
163c13cecf6SAdam Dybkowski 					QAT_DP_LOG(DEBUG,
164c13cecf6SAdam Dybkowski 							"Input data is too big, op must be split into %u descriptors",
165c13cecf6SAdam Dybkowski 							nb_descriptors_needed);
166c13cecf6SAdam Dybkowski 					return (int) nb_descriptors_needed;
167c13cecf6SAdam Dybkowski 				}
168c13cecf6SAdam Dybkowski 			}
169c13cecf6SAdam Dybkowski 		}
170c13cecf6SAdam Dybkowski 
171c13cecf6SAdam Dybkowski 		/* set BFINAL bit according to flush_flag */
172c13cecf6SAdam Dybkowski 		comp_req->comp_pars.req_par_flags =
173c13cecf6SAdam Dybkowski 			ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
174c13cecf6SAdam Dybkowski 				ICP_QAT_FW_COMP_SOP,
175c13cecf6SAdam Dybkowski 				ICP_QAT_FW_COMP_EOP,
176c13cecf6SAdam Dybkowski 				op->flush_flag == RTE_COMP_FLUSH_FINAL ?
177c13cecf6SAdam Dybkowski 					ICP_QAT_FW_COMP_BFINAL
178c13cecf6SAdam Dybkowski 					: ICP_QAT_FW_COMP_NOT_BFINAL,
179c13cecf6SAdam Dybkowski 				ICP_QAT_FW_COMP_CNV,
180c13cecf6SAdam Dybkowski 				ICP_QAT_FW_COMP_CNV_RECOVERY);
181c13cecf6SAdam Dybkowski 
182c13cecf6SAdam Dybkowski 	} else if (op->op_type == RTE_COMP_OP_STATEFUL) {
183c13cecf6SAdam Dybkowski 
18482822753SAdam Dybkowski 		comp_req->comp_pars.req_par_flags =
18582822753SAdam Dybkowski 			ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
18682822753SAdam Dybkowski 				(stream->start_of_packet) ?
18782822753SAdam Dybkowski 					ICP_QAT_FW_COMP_SOP
18882822753SAdam Dybkowski 				      : ICP_QAT_FW_COMP_NOT_SOP,
18982822753SAdam Dybkowski 				(op->flush_flag == RTE_COMP_FLUSH_FULL ||
19082822753SAdam Dybkowski 				 op->flush_flag == RTE_COMP_FLUSH_FINAL) ?
19182822753SAdam Dybkowski 					ICP_QAT_FW_COMP_EOP
19282822753SAdam Dybkowski 				      : ICP_QAT_FW_COMP_NOT_EOP,
19382822753SAdam Dybkowski 				ICP_QAT_FW_COMP_NOT_BFINAL,
194da573c0eSAdam Dybkowski 				ICP_QAT_FW_COMP_CNV,
195da573c0eSAdam Dybkowski 				ICP_QAT_FW_COMP_CNV_RECOVERY);
19682822753SAdam Dybkowski 	}
19782822753SAdam Dybkowski 
19832842f2aSFiona Trahe 	/* common for sgl and flat buffers */
19932842f2aSFiona Trahe 	comp_req->comp_pars.comp_len = op->src.length;
2004b57bfecSTomasz Jozwiak 	comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst) -
2014b57bfecSTomasz Jozwiak 			op->dst.offset;
20232842f2aSFiona Trahe 
20332842f2aSFiona Trahe 	if (op->m_src->next != NULL || op->m_dst->next != NULL) {
2041947bd18SFiona Trahe 		/* sgl */
2051947bd18SFiona Trahe 		int ret = 0;
2061947bd18SFiona Trahe 
2071947bd18SFiona Trahe 		ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
2081947bd18SFiona Trahe 				QAT_COMN_PTR_TYPE_SGL);
209b30aa891STomasz Jozwiak 
21035233274STomasz Jozwiak 		if (unlikely(op->m_src->nb_segs > cookie->src_nb_elems)) {
21135233274STomasz Jozwiak 			/* we need to allocate more elements in SGL*/
21235233274STomasz Jozwiak 			void *tmp;
21335233274STomasz Jozwiak 
21435233274STomasz Jozwiak 			tmp = rte_realloc_socket(cookie->qat_sgl_src_d,
21535233274STomasz Jozwiak 					  sizeof(struct qat_sgl) +
21635233274STomasz Jozwiak 					  sizeof(struct qat_flat_buf) *
21735233274STomasz Jozwiak 					  op->m_src->nb_segs, 64,
21835233274STomasz Jozwiak 					  cookie->socket_id);
21935233274STomasz Jozwiak 
22035233274STomasz Jozwiak 			if (unlikely(tmp == NULL)) {
22135233274STomasz Jozwiak 				QAT_DP_LOG(ERR, "QAT PMD can't allocate memory"
22235233274STomasz Jozwiak 					   " for %d elements of SGL",
22335233274STomasz Jozwiak 					   op->m_src->nb_segs);
224bf0cd226SFiona Trahe 				op->status = RTE_COMP_OP_STATUS_ERROR;
22582822753SAdam Dybkowski 				/* clear op-in-progress flag */
22682822753SAdam Dybkowski 				if (stream)
22782822753SAdam Dybkowski 					stream->op_in_progress = 0;
22835233274STomasz Jozwiak 				return -ENOMEM;
22935233274STomasz Jozwiak 			}
23035233274STomasz Jozwiak 			/* new SGL is valid now */
23135233274STomasz Jozwiak 			cookie->qat_sgl_src_d = (struct qat_sgl *)tmp;
23235233274STomasz Jozwiak 			cookie->src_nb_elems = op->m_src->nb_segs;
23335233274STomasz Jozwiak 			cookie->qat_sgl_src_phys_addr =
23435233274STomasz Jozwiak 				rte_malloc_virt2iova(cookie->qat_sgl_src_d);
23535233274STomasz Jozwiak 		}
23635233274STomasz Jozwiak 
2371947bd18SFiona Trahe 		ret = qat_sgl_fill_array(op->m_src,
238b30aa891STomasz Jozwiak 				op->src.offset,
23935233274STomasz Jozwiak 				cookie->qat_sgl_src_d,
2401947bd18SFiona Trahe 				op->src.length,
24135233274STomasz Jozwiak 				cookie->src_nb_elems);
2421947bd18SFiona Trahe 		if (ret) {
243b30aa891STomasz Jozwiak 			QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array");
244652b59dfSTomasz Jozwiak 			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
24582822753SAdam Dybkowski 			/* clear op-in-progress flag */
24682822753SAdam Dybkowski 			if (stream)
24782822753SAdam Dybkowski 				stream->op_in_progress = 0;
2481947bd18SFiona Trahe 			return ret;
2491947bd18SFiona Trahe 		}
2501947bd18SFiona Trahe 
25135233274STomasz Jozwiak 		if (unlikely(op->m_dst->nb_segs > cookie->dst_nb_elems)) {
25235233274STomasz Jozwiak 			/* we need to allocate more elements in SGL*/
25335233274STomasz Jozwiak 			struct qat_sgl *tmp;
25435233274STomasz Jozwiak 
25535233274STomasz Jozwiak 			tmp = rte_realloc_socket(cookie->qat_sgl_dst_d,
25635233274STomasz Jozwiak 					  sizeof(struct qat_sgl) +
25735233274STomasz Jozwiak 					  sizeof(struct qat_flat_buf) *
25835233274STomasz Jozwiak 					  op->m_dst->nb_segs, 64,
25935233274STomasz Jozwiak 					  cookie->socket_id);
26035233274STomasz Jozwiak 
26135233274STomasz Jozwiak 			if (unlikely(tmp == NULL)) {
26235233274STomasz Jozwiak 				QAT_DP_LOG(ERR, "QAT PMD can't allocate memory"
26335233274STomasz Jozwiak 					   " for %d elements of SGL",
26435233274STomasz Jozwiak 					   op->m_dst->nb_segs);
265bf0cd226SFiona Trahe 				op->status = RTE_COMP_OP_STATUS_ERROR;
26682822753SAdam Dybkowski 				/* clear op-in-progress flag */
26782822753SAdam Dybkowski 				if (stream)
26882822753SAdam Dybkowski 					stream->op_in_progress = 0;
269bf0cd226SFiona Trahe 				return -ENOMEM;
27035233274STomasz Jozwiak 			}
27135233274STomasz Jozwiak 			/* new SGL is valid now */
27235233274STomasz Jozwiak 			cookie->qat_sgl_dst_d = (struct qat_sgl *)tmp;
27335233274STomasz Jozwiak 			cookie->dst_nb_elems = op->m_dst->nb_segs;
27435233274STomasz Jozwiak 			cookie->qat_sgl_dst_phys_addr =
27535233274STomasz Jozwiak 				rte_malloc_virt2iova(cookie->qat_sgl_dst_d);
27635233274STomasz Jozwiak 		}
27735233274STomasz Jozwiak 
2781947bd18SFiona Trahe 		ret = qat_sgl_fill_array(op->m_dst,
279b30aa891STomasz Jozwiak 				op->dst.offset,
28035233274STomasz Jozwiak 				cookie->qat_sgl_dst_d,
2811947bd18SFiona Trahe 				comp_req->comp_pars.out_buffer_sz,
28235233274STomasz Jozwiak 				cookie->dst_nb_elems);
2831947bd18SFiona Trahe 		if (ret) {
284b30aa891STomasz Jozwiak 			QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array");
285652b59dfSTomasz Jozwiak 			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
28682822753SAdam Dybkowski 			/* clear op-in-progress flag */
28782822753SAdam Dybkowski 			if (stream)
28882822753SAdam Dybkowski 				stream->op_in_progress = 0;
2891947bd18SFiona Trahe 			return ret;
2901947bd18SFiona Trahe 		}
2911947bd18SFiona Trahe 
2921947bd18SFiona Trahe 		comp_req->comn_mid.src_data_addr =
2931947bd18SFiona Trahe 				cookie->qat_sgl_src_phys_addr;
2941947bd18SFiona Trahe 		comp_req->comn_mid.dest_data_addr =
2951947bd18SFiona Trahe 				cookie->qat_sgl_dst_phys_addr;
2961947bd18SFiona Trahe 		comp_req->comn_mid.src_length = 0;
2971947bd18SFiona Trahe 		comp_req->comn_mid.dst_length = 0;
29832842f2aSFiona Trahe 
29932842f2aSFiona Trahe 	} else {
3001947bd18SFiona Trahe 		/* flat aka linear buffer */
30132842f2aSFiona Trahe 		ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
30232842f2aSFiona Trahe 				QAT_COMN_PTR_TYPE_FLAT);
3034b57bfecSTomasz Jozwiak 		comp_req->comn_mid.src_length = op->src.length;
3044b57bfecSTomasz Jozwiak 		comp_req->comn_mid.dst_length =
3054b57bfecSTomasz Jozwiak 				comp_req->comp_pars.out_buffer_sz;
30632842f2aSFiona Trahe 
30732842f2aSFiona Trahe 		comp_req->comn_mid.src_data_addr =
308ce627d63SThomas Monjalon 		    rte_pktmbuf_iova_offset(op->m_src, op->src.offset);
30932842f2aSFiona Trahe 		comp_req->comn_mid.dest_data_addr =
310ce627d63SThomas Monjalon 		    rte_pktmbuf_iova_offset(op->m_dst, op->dst.offset);
31132842f2aSFiona Trahe 	}
31232842f2aSFiona Trahe 
313b643808fSTomasz Jozwiak 	if (unlikely(rte_pktmbuf_pkt_len(op->m_dst) < QAT_MIN_OUT_BUF_SIZE)) {
314b643808fSTomasz Jozwiak 		/* QAT doesn't support dest. buffer lower
315b643808fSTomasz Jozwiak 		 * than QAT_MIN_OUT_BUF_SIZE. Propagate error mark
316b643808fSTomasz Jozwiak 		 * by converting this request to the null one
317b643808fSTomasz Jozwiak 		 * and check the status in the response.
318b643808fSTomasz Jozwiak 		 */
319b643808fSTomasz Jozwiak 		QAT_DP_LOG(WARNING, "QAT destination buffer too small - resend with larger buffer");
320b643808fSTomasz Jozwiak 		comp_req->comn_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
321b643808fSTomasz Jozwiak 		comp_req->comn_hdr.service_cmd_id = ICP_QAT_FW_NULL_REQ_SERV_ID;
322b643808fSTomasz Jozwiak 		cookie->error = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
323b643808fSTomasz Jozwiak 	}
324b643808fSTomasz Jozwiak 
32532842f2aSFiona Trahe #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
32632842f2aSFiona Trahe 	QAT_DP_LOG(DEBUG, "Direction: %s",
32732842f2aSFiona Trahe 	    qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
32832842f2aSFiona Trahe 			    "decompression" : "compression");
32932842f2aSFiona Trahe 	QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message:", comp_req,
33032842f2aSFiona Trahe 		    sizeof(struct icp_qat_fw_comp_req));
33132842f2aSFiona Trahe #endif
33232842f2aSFiona Trahe 	return 0;
33332842f2aSFiona Trahe }
33432842f2aSFiona Trahe 
3354c6912d3SFan Zhang static inline uint32_t
adf_modulo(uint32_t data,uint32_t modulo_mask)3364c6912d3SFan Zhang adf_modulo(uint32_t data, uint32_t modulo_mask)
337c13cecf6SAdam Dybkowski {
338c13cecf6SAdam Dybkowski 	return data & modulo_mask;
339c13cecf6SAdam Dybkowski }
340c13cecf6SAdam Dybkowski 
341c13cecf6SAdam Dybkowski static inline void
qat_comp_mbuf_skip(struct rte_mbuf ** mbuf,uint32_t * offset,uint32_t len)342c13cecf6SAdam Dybkowski qat_comp_mbuf_skip(struct rte_mbuf **mbuf, uint32_t *offset, uint32_t len)
343c13cecf6SAdam Dybkowski {
344c13cecf6SAdam Dybkowski 	while (*offset + len >= rte_pktmbuf_data_len(*mbuf)) {
345c13cecf6SAdam Dybkowski 		len -= (rte_pktmbuf_data_len(*mbuf) - *offset);
346c13cecf6SAdam Dybkowski 		*mbuf = (*mbuf)->next;
347c13cecf6SAdam Dybkowski 		*offset = 0;
348c13cecf6SAdam Dybkowski 	}
349c13cecf6SAdam Dybkowski 	*offset = len;
350c13cecf6SAdam Dybkowski }
351c13cecf6SAdam Dybkowski 
352c13cecf6SAdam Dybkowski int
qat_comp_build_multiple_requests(void * in_op,struct qat_qp * qp,uint32_t parent_tail,int nb_descr)353c13cecf6SAdam Dybkowski qat_comp_build_multiple_requests(void *in_op, struct qat_qp *qp,
354c13cecf6SAdam Dybkowski 				 uint32_t parent_tail, int nb_descr)
355c13cecf6SAdam Dybkowski {
356c13cecf6SAdam Dybkowski 	struct rte_comp_op op_backup;
357c13cecf6SAdam Dybkowski 	struct rte_mbuf dst_mbuf;
358c13cecf6SAdam Dybkowski 	struct rte_comp_op *op = in_op;
359c13cecf6SAdam Dybkowski 	struct qat_queue *txq = &(qp->tx_q);
360c13cecf6SAdam Dybkowski 	uint8_t *base_addr = (uint8_t *)txq->base_addr;
361c13cecf6SAdam Dybkowski 	uint8_t *out_msg = base_addr + parent_tail;
362c13cecf6SAdam Dybkowski 	uint32_t tail = parent_tail;
363c13cecf6SAdam Dybkowski 	struct icp_qat_fw_comp_req *comp_req =
364c13cecf6SAdam Dybkowski 			(struct icp_qat_fw_comp_req *)out_msg;
365c13cecf6SAdam Dybkowski 	struct qat_comp_op_cookie *parent_cookie =
366c13cecf6SAdam Dybkowski 			(struct qat_comp_op_cookie *)
367c13cecf6SAdam Dybkowski 			qp->op_cookies[parent_tail / txq->msg_size];
368c13cecf6SAdam Dybkowski 	struct qat_comp_op_cookie *child_cookie;
369c13cecf6SAdam Dybkowski 	uint16_t dst_data_size =
370c13cecf6SAdam Dybkowski 			RTE_MIN(RTE_PMD_QAT_COMP_IM_BUFFER_SIZE, 65535);
371c13cecf6SAdam Dybkowski 	uint32_t data_to_enqueue = op->src.length - QAT_FALLBACK_THLD;
372c13cecf6SAdam Dybkowski 	int num_descriptors_built = 1;
373c13cecf6SAdam Dybkowski 	int ret;
374c13cecf6SAdam Dybkowski 
375c13cecf6SAdam Dybkowski 	QAT_DP_LOG(DEBUG, "op %p, parent_cookie %p", op, parent_cookie);
376c13cecf6SAdam Dybkowski 
377c13cecf6SAdam Dybkowski 	/* copy original op to the local variable for restoring later */
378c13cecf6SAdam Dybkowski 	rte_memcpy(&op_backup, op, sizeof(op_backup));
379c13cecf6SAdam Dybkowski 
380c13cecf6SAdam Dybkowski 	parent_cookie->nb_child_responses = 0;
381c13cecf6SAdam Dybkowski 	parent_cookie->nb_children = 0;
382c13cecf6SAdam Dybkowski 	parent_cookie->split_op = 1;
383c13cecf6SAdam Dybkowski 	parent_cookie->dst_data = op->m_dst;
384c13cecf6SAdam Dybkowski 	parent_cookie->dst_data_offset = op->dst.offset;
385c13cecf6SAdam Dybkowski 
386c13cecf6SAdam Dybkowski 	op->src.length = QAT_FALLBACK_THLD;
387c13cecf6SAdam Dybkowski 	op->flush_flag = RTE_COMP_FLUSH_FULL;
388c13cecf6SAdam Dybkowski 
389c13cecf6SAdam Dybkowski 	QAT_DP_LOG(DEBUG, "parent op src len %u dst len %u",
390c13cecf6SAdam Dybkowski 			op->src.length, op->m_dst->pkt_len);
391c13cecf6SAdam Dybkowski 
392c13cecf6SAdam Dybkowski 	ret = qat_comp_build_request(in_op, out_msg, parent_cookie,
393c13cecf6SAdam Dybkowski 			qp->qat_dev_gen);
394c13cecf6SAdam Dybkowski 	if (ret != 0) {
395c13cecf6SAdam Dybkowski 		/* restore op and clear cookie */
396c13cecf6SAdam Dybkowski 		QAT_DP_LOG(WARNING, "Failed to build parent descriptor");
397c13cecf6SAdam Dybkowski 		op->src.length = op_backup.src.length;
398c13cecf6SAdam Dybkowski 		op->flush_flag = op_backup.flush_flag;
399c13cecf6SAdam Dybkowski 		parent_cookie->split_op = 0;
400c13cecf6SAdam Dybkowski 		return ret;
401c13cecf6SAdam Dybkowski 	}
402c13cecf6SAdam Dybkowski 
403c13cecf6SAdam Dybkowski 	/* prepare local dst mbuf */
404c13cecf6SAdam Dybkowski 	rte_memcpy(&dst_mbuf, op->m_dst, sizeof(dst_mbuf));
405c13cecf6SAdam Dybkowski 	rte_pktmbuf_reset(&dst_mbuf);
406c13cecf6SAdam Dybkowski 	dst_mbuf.buf_len = dst_data_size;
407c13cecf6SAdam Dybkowski 	dst_mbuf.data_len = dst_data_size;
408c13cecf6SAdam Dybkowski 	dst_mbuf.pkt_len = dst_data_size;
409c13cecf6SAdam Dybkowski 	dst_mbuf.data_off = 0;
410c13cecf6SAdam Dybkowski 
411c13cecf6SAdam Dybkowski 	/* update op for the child operations */
412c13cecf6SAdam Dybkowski 	op->m_dst = &dst_mbuf;
413c13cecf6SAdam Dybkowski 	op->dst.offset = 0;
414c13cecf6SAdam Dybkowski 
415c13cecf6SAdam Dybkowski 	while (data_to_enqueue) {
416c13cecf6SAdam Dybkowski 		const struct rte_memzone *mz =
417c13cecf6SAdam Dybkowski 			parent_cookie->dst_memzones[num_descriptors_built - 1];
418c13cecf6SAdam Dybkowski 		uint32_t src_data_size = RTE_MIN(data_to_enqueue,
419c13cecf6SAdam Dybkowski 				QAT_FALLBACK_THLD);
420c13cecf6SAdam Dybkowski 		uint32_t cookie_index;
421c13cecf6SAdam Dybkowski 
422c13cecf6SAdam Dybkowski 		/* update params for the next op */
423c13cecf6SAdam Dybkowski 		op->src.offset += QAT_FALLBACK_THLD;
424c13cecf6SAdam Dybkowski 		op->src.length = src_data_size;
425c13cecf6SAdam Dybkowski 		op->flush_flag = (src_data_size == data_to_enqueue) ?
426c13cecf6SAdam Dybkowski 			op_backup.flush_flag : RTE_COMP_FLUSH_FULL;
427c13cecf6SAdam Dybkowski 
428c13cecf6SAdam Dybkowski 		/* update dst mbuf for the next op (use memzone for dst data) */
429c13cecf6SAdam Dybkowski 		dst_mbuf.buf_addr = mz->addr;
430c13cecf6SAdam Dybkowski 		dst_mbuf.buf_iova = mz->iova;
431c13cecf6SAdam Dybkowski 
432c13cecf6SAdam Dybkowski 		/* move the tail and calculate next cookie index */
433c13cecf6SAdam Dybkowski 		tail = adf_modulo(tail + txq->msg_size, txq->modulo_mask);
434c13cecf6SAdam Dybkowski 		cookie_index = tail / txq->msg_size;
435c13cecf6SAdam Dybkowski 		child_cookie = (struct qat_comp_op_cookie *)
436c13cecf6SAdam Dybkowski 				qp->op_cookies[cookie_index];
437c13cecf6SAdam Dybkowski 		comp_req = (struct icp_qat_fw_comp_req *)(base_addr + tail);
438c13cecf6SAdam Dybkowski 
439c13cecf6SAdam Dybkowski 		/* update child cookie */
440c13cecf6SAdam Dybkowski 		child_cookie->split_op = 1; /* must be set for child as well */
441c13cecf6SAdam Dybkowski 		child_cookie->parent_cookie = parent_cookie; /* same as above */
442c13cecf6SAdam Dybkowski 		child_cookie->nb_children = 0;
443c13cecf6SAdam Dybkowski 		child_cookie->dest_buffer = mz->addr;
444c13cecf6SAdam Dybkowski 
445c13cecf6SAdam Dybkowski 		QAT_DP_LOG(DEBUG,
446c13cecf6SAdam Dybkowski 				"cookie_index %u, child_cookie %p, comp_req %p",
447c13cecf6SAdam Dybkowski 				cookie_index, child_cookie, comp_req);
448c13cecf6SAdam Dybkowski 		QAT_DP_LOG(DEBUG,
449c13cecf6SAdam Dybkowski 				"data_to_enqueue %u, num_descriptors_built %d",
450c13cecf6SAdam Dybkowski 				data_to_enqueue, num_descriptors_built);
451c13cecf6SAdam Dybkowski 		QAT_DP_LOG(DEBUG, "child op src len %u dst len %u",
452c13cecf6SAdam Dybkowski 				op->src.length, op->m_dst->pkt_len);
453c13cecf6SAdam Dybkowski 
454c13cecf6SAdam Dybkowski 		/* build the request */
455c13cecf6SAdam Dybkowski 		ret = qat_comp_build_request(op, (uint8_t *)comp_req,
456c13cecf6SAdam Dybkowski 				child_cookie, qp->qat_dev_gen);
457c13cecf6SAdam Dybkowski 		if (ret < 0) {
458c13cecf6SAdam Dybkowski 			QAT_DP_LOG(WARNING, "Failed to build child descriptor");
459c13cecf6SAdam Dybkowski 			/* restore op and clear cookie */
460c13cecf6SAdam Dybkowski 			rte_memcpy(op, &op_backup, sizeof(op_backup));
461c13cecf6SAdam Dybkowski 			parent_cookie->split_op = 0;
462c13cecf6SAdam Dybkowski 			parent_cookie->nb_children = 0;
463c13cecf6SAdam Dybkowski 			return ret;
464c13cecf6SAdam Dybkowski 		}
465c13cecf6SAdam Dybkowski 
466c13cecf6SAdam Dybkowski 		data_to_enqueue -= src_data_size;
467c13cecf6SAdam Dybkowski 		num_descriptors_built++;
468c13cecf6SAdam Dybkowski 	}
469c13cecf6SAdam Dybkowski 
470c13cecf6SAdam Dybkowski 	/* restore backed up original op */
471c13cecf6SAdam Dybkowski 	rte_memcpy(op, &op_backup, sizeof(op_backup));
472c13cecf6SAdam Dybkowski 
473c13cecf6SAdam Dybkowski 	if (nb_descr != num_descriptors_built)
474c13cecf6SAdam Dybkowski 		QAT_DP_LOG(ERR, "split op. expected %d, built %d",
475c13cecf6SAdam Dybkowski 				nb_descr, num_descriptors_built);
476c13cecf6SAdam Dybkowski 
477c13cecf6SAdam Dybkowski 	parent_cookie->nb_children = num_descriptors_built - 1;
478c13cecf6SAdam Dybkowski 	return num_descriptors_built;
479c13cecf6SAdam Dybkowski }
480c13cecf6SAdam Dybkowski 
481c13cecf6SAdam Dybkowski static inline void
qat_comp_response_data_copy(struct qat_comp_op_cookie * cookie,struct rte_comp_op * rx_op)482c13cecf6SAdam Dybkowski qat_comp_response_data_copy(struct qat_comp_op_cookie *cookie,
483c13cecf6SAdam Dybkowski 		       struct rte_comp_op *rx_op)
484c13cecf6SAdam Dybkowski {
485c13cecf6SAdam Dybkowski 	struct qat_comp_op_cookie *pc = cookie->parent_cookie;
486c13cecf6SAdam Dybkowski 	struct rte_mbuf *sgl_buf = pc->dst_data;
487c13cecf6SAdam Dybkowski 	void *op_dst_addr = rte_pktmbuf_mtod_offset(sgl_buf, uint8_t *,
488c13cecf6SAdam Dybkowski 						    pc->dst_data_offset);
489c13cecf6SAdam Dybkowski 
490c13cecf6SAdam Dybkowski 	/* number of bytes left in the current segment */
491c13cecf6SAdam Dybkowski 	uint32_t left_in_current = rte_pktmbuf_data_len(sgl_buf) -
492c13cecf6SAdam Dybkowski 			pc->dst_data_offset;
493c13cecf6SAdam Dybkowski 
494c13cecf6SAdam Dybkowski 	uint32_t prod, sent;
495c13cecf6SAdam Dybkowski 
496c13cecf6SAdam Dybkowski 	if (rx_op->produced <= left_in_current) {
497c13cecf6SAdam Dybkowski 		rte_memcpy(op_dst_addr, cookie->dest_buffer,
498c13cecf6SAdam Dybkowski 				rx_op->produced);
499c13cecf6SAdam Dybkowski 		/* calculate dst mbuf and offset for the next child op */
500c13cecf6SAdam Dybkowski 		if (rx_op->produced == left_in_current) {
501c13cecf6SAdam Dybkowski 			pc->dst_data = sgl_buf->next;
502c13cecf6SAdam Dybkowski 			pc->dst_data_offset = 0;
503c13cecf6SAdam Dybkowski 		} else
504c13cecf6SAdam Dybkowski 			pc->dst_data_offset += rx_op->produced;
505c13cecf6SAdam Dybkowski 	} else {
506c13cecf6SAdam Dybkowski 		rte_memcpy(op_dst_addr, cookie->dest_buffer,
507c13cecf6SAdam Dybkowski 				left_in_current);
508c13cecf6SAdam Dybkowski 		sgl_buf = sgl_buf->next;
509c13cecf6SAdam Dybkowski 		prod = rx_op->produced - left_in_current;
510c13cecf6SAdam Dybkowski 		sent = left_in_current;
511c13cecf6SAdam Dybkowski 		while (prod > rte_pktmbuf_data_len(sgl_buf)) {
512c13cecf6SAdam Dybkowski 			op_dst_addr = rte_pktmbuf_mtod_offset(sgl_buf,
513c13cecf6SAdam Dybkowski 					uint8_t *, 0);
514c13cecf6SAdam Dybkowski 
515c13cecf6SAdam Dybkowski 			rte_memcpy(op_dst_addr,
516c13cecf6SAdam Dybkowski 					((uint8_t *)cookie->dest_buffer) +
517c13cecf6SAdam Dybkowski 					sent,
518c13cecf6SAdam Dybkowski 					rte_pktmbuf_data_len(sgl_buf));
519c13cecf6SAdam Dybkowski 
520c13cecf6SAdam Dybkowski 			prod -= rte_pktmbuf_data_len(sgl_buf);
521c13cecf6SAdam Dybkowski 			sent += rte_pktmbuf_data_len(sgl_buf);
522c13cecf6SAdam Dybkowski 
523c13cecf6SAdam Dybkowski 			sgl_buf = sgl_buf->next;
524c13cecf6SAdam Dybkowski 		}
525c13cecf6SAdam Dybkowski 
526c13cecf6SAdam Dybkowski 		op_dst_addr = rte_pktmbuf_mtod_offset(sgl_buf, uint8_t *, 0);
527c13cecf6SAdam Dybkowski 
528c13cecf6SAdam Dybkowski 		rte_memcpy(op_dst_addr,
529c13cecf6SAdam Dybkowski 				((uint8_t *)cookie->dest_buffer) + sent,
530c13cecf6SAdam Dybkowski 				prod);
531c13cecf6SAdam Dybkowski 
532c13cecf6SAdam Dybkowski 		/* calculate dst mbuf and offset for the next child op */
533c13cecf6SAdam Dybkowski 		if (prod == rte_pktmbuf_data_len(sgl_buf)) {
534c13cecf6SAdam Dybkowski 			pc->dst_data = sgl_buf->next;
535c13cecf6SAdam Dybkowski 			pc->dst_data_offset = 0;
536c13cecf6SAdam Dybkowski 		} else {
537c13cecf6SAdam Dybkowski 			pc->dst_data = sgl_buf;
538c13cecf6SAdam Dybkowski 			pc->dst_data_offset = prod;
539c13cecf6SAdam Dybkowski 		}
540c13cecf6SAdam Dybkowski 	}
541c13cecf6SAdam Dybkowski }
542c13cecf6SAdam Dybkowski 
54332842f2aSFiona Trahe int
qat_comp_process_response(void ** op,uint8_t * resp,void * op_cookie,uint64_t * dequeue_err_count)544b643808fSTomasz Jozwiak qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie,
545b643808fSTomasz Jozwiak 			  uint64_t *dequeue_err_count)
54632842f2aSFiona Trahe {
54732842f2aSFiona Trahe 	struct icp_qat_fw_comp_resp *resp_msg =
54832842f2aSFiona Trahe 			(struct icp_qat_fw_comp_resp *)resp;
549b643808fSTomasz Jozwiak 	struct qat_comp_op_cookie *cookie =
550b643808fSTomasz Jozwiak 			(struct qat_comp_op_cookie *)op_cookie;
551c13cecf6SAdam Dybkowski 
552c13cecf6SAdam Dybkowski 	struct icp_qat_fw_resp_comp_pars *comp_resp1 =
553c13cecf6SAdam Dybkowski 	  (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars;
554c13cecf6SAdam Dybkowski 
555c13cecf6SAdam Dybkowski 	QAT_DP_LOG(DEBUG, "input counter = %u, output counter = %u",
556c13cecf6SAdam Dybkowski 		   comp_resp1->input_byte_counter,
557c13cecf6SAdam Dybkowski 		   comp_resp1->output_byte_counter);
558c13cecf6SAdam Dybkowski 
55932842f2aSFiona Trahe 	struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t)
56032842f2aSFiona Trahe 			(resp_msg->opaque_data);
56182822753SAdam Dybkowski 	struct qat_comp_stream *stream;
56282822753SAdam Dybkowski 	struct qat_comp_xform *qat_xform;
5633dc9ef2dSTomasz Jozwiak 	int err = resp_msg->comn_resp.comn_status &
5643dc9ef2dSTomasz Jozwiak 			((1 << QAT_COMN_RESP_CMP_STATUS_BITPOS) |
5653dc9ef2dSTomasz Jozwiak 			 (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS));
56632842f2aSFiona Trahe 
56782822753SAdam Dybkowski 	if (rx_op->op_type == RTE_COMP_OP_STATEFUL) {
56882822753SAdam Dybkowski 		stream = rx_op->stream;
56982822753SAdam Dybkowski 		qat_xform = &stream->qat_xform;
57082822753SAdam Dybkowski 		/* clear op-in-progress flag */
57182822753SAdam Dybkowski 		stream->op_in_progress = 0;
57282822753SAdam Dybkowski 	} else {
57382822753SAdam Dybkowski 		stream = NULL;
57482822753SAdam Dybkowski 		qat_xform = rx_op->private_xform;
57582822753SAdam Dybkowski 	}
57682822753SAdam Dybkowski 
57732842f2aSFiona Trahe #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
57832842f2aSFiona Trahe 	QAT_DP_LOG(DEBUG, "Direction: %s",
57932842f2aSFiona Trahe 	    qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
58032842f2aSFiona Trahe 	    "decompression" : "compression");
58132842f2aSFiona Trahe 	QAT_DP_HEXDUMP_LOG(DEBUG,  "qat_response:", (uint8_t *)resp_msg,
58232842f2aSFiona Trahe 			sizeof(struct icp_qat_fw_comp_resp));
58332842f2aSFiona Trahe #endif
58432842f2aSFiona Trahe 
585b643808fSTomasz Jozwiak 	if (unlikely(cookie->error)) {
586b643808fSTomasz Jozwiak 		rx_op->status = cookie->error;
587b643808fSTomasz Jozwiak 		cookie->error = 0;
588b643808fSTomasz Jozwiak 		++(*dequeue_err_count);
589b643808fSTomasz Jozwiak 		rx_op->debug_status = 0;
590b643808fSTomasz Jozwiak 		rx_op->consumed = 0;
591b643808fSTomasz Jozwiak 		rx_op->produced = 0;
592b643808fSTomasz Jozwiak 		*op = (void *)rx_op;
593c13cecf6SAdam Dybkowski 		/* also in this case number of returned ops */
594c13cecf6SAdam Dybkowski 		/* must be equal to one, */
595c13cecf6SAdam Dybkowski 		/* appropriate status (error) must be set as well */
596c13cecf6SAdam Dybkowski 		return 1;
597b643808fSTomasz Jozwiak 	}
598b643808fSTomasz Jozwiak 
5993cc14fc4SFiona Trahe 	if (likely(qat_xform->qat_comp_request_type
6003cc14fc4SFiona Trahe 			!= QAT_COMP_REQUEST_DECOMPRESS)) {
6013cc14fc4SFiona Trahe 		if (unlikely(ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(
6023cc14fc4SFiona Trahe 				resp_msg->comn_resp.hdr_flags)
6033cc14fc4SFiona Trahe 					== ICP_QAT_FW_COMP_NO_CNV)) {
6043cc14fc4SFiona Trahe 			rx_op->status = RTE_COMP_OP_STATUS_ERROR;
6053cc14fc4SFiona Trahe 			rx_op->debug_status = ERR_CODE_QAT_COMP_WRONG_FW;
6063cc14fc4SFiona Trahe 			*op = (void *)rx_op;
607da573c0eSAdam Dybkowski 			QAT_DP_LOG(ERR,
608da573c0eSAdam Dybkowski 					"This QAT hardware doesn't support compression operation");
609ba83e5c0STomasz Jozwiak 			++(*dequeue_err_count);
610c13cecf6SAdam Dybkowski 			return 1;
6113cc14fc4SFiona Trahe 		}
6123cc14fc4SFiona Trahe 	}
6133cc14fc4SFiona Trahe 
6143dc9ef2dSTomasz Jozwiak 	if (err) {
6153dc9ef2dSTomasz Jozwiak 		if (unlikely((err & (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS))
6163dc9ef2dSTomasz Jozwiak 			     &&	(qat_xform->qat_comp_request_type
6173dc9ef2dSTomasz Jozwiak 				 == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS))) {
6184e8f2d6aSFiona Trahe 			QAT_DP_LOG(ERR, "QAT intermediate buffer may be too "
6194e8f2d6aSFiona Trahe 			    "small for output, try configuring a larger size");
6203dc9ef2dSTomasz Jozwiak 		}
6213dc9ef2dSTomasz Jozwiak 
6223dc9ef2dSTomasz Jozwiak 		int8_t cmp_err_code =
6233dc9ef2dSTomasz Jozwiak 			(int8_t)resp_msg->comn_resp.comn_error.cmp_err_code;
6243dc9ef2dSTomasz Jozwiak 		int8_t xlat_err_code =
6253dc9ef2dSTomasz Jozwiak 			(int8_t)resp_msg->comn_resp.comn_error.xlat_err_code;
6263dc9ef2dSTomasz Jozwiak 
627c13cecf6SAdam Dybkowski 		/* handle recoverable out-of-buffer condition in stateful
628c13cecf6SAdam Dybkowski 		 * decompression scenario
629c13cecf6SAdam Dybkowski 		 */
63082822753SAdam Dybkowski 		if (cmp_err_code == ERR_CODE_OVERFLOW_ERROR && !xlat_err_code
63182822753SAdam Dybkowski 				&& qat_xform->qat_comp_request_type
63282822753SAdam Dybkowski 					== QAT_COMP_REQUEST_DECOMPRESS
63382822753SAdam Dybkowski 				&& rx_op->op_type == RTE_COMP_OP_STATEFUL) {
63482822753SAdam Dybkowski 			struct icp_qat_fw_resp_comp_pars *comp_resp =
63582822753SAdam Dybkowski 					&resp_msg->comp_resp_pars;
63682822753SAdam Dybkowski 			rx_op->status =
63782822753SAdam Dybkowski 				RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE;
63882822753SAdam Dybkowski 			rx_op->consumed = comp_resp->input_byte_counter;
63982822753SAdam Dybkowski 			rx_op->produced = comp_resp->output_byte_counter;
64082822753SAdam Dybkowski 			stream->start_of_packet = 0;
64182822753SAdam Dybkowski 		} else if ((cmp_err_code == ERR_CODE_OVERFLOW_ERROR
64282822753SAdam Dybkowski 			  && !xlat_err_code)
6433dc9ef2dSTomasz Jozwiak 				||
6443dc9ef2dSTomasz Jozwiak 		    (!cmp_err_code && xlat_err_code == ERR_CODE_OVERFLOW_ERROR)
6453dc9ef2dSTomasz Jozwiak 				||
6463dc9ef2dSTomasz Jozwiak 		    (cmp_err_code == ERR_CODE_OVERFLOW_ERROR &&
647984e78d3SArtur Trybula 		     xlat_err_code == ERR_CODE_OVERFLOW_ERROR)){
648984e78d3SArtur Trybula 
649984e78d3SArtur Trybula 			struct icp_qat_fw_resp_comp_pars *comp_resp =
650c13cecf6SAdam Dybkowski 					(struct icp_qat_fw_resp_comp_pars *)
651c13cecf6SAdam Dybkowski 					&resp_msg->comp_resp_pars;
652984e78d3SArtur Trybula 
653c13cecf6SAdam Dybkowski 			/* handle recoverable out-of-buffer condition
654c13cecf6SAdam Dybkowski 			 * in stateless compression scenario
655c13cecf6SAdam Dybkowski 			 */
656984e78d3SArtur Trybula 			if (comp_resp->input_byte_counter) {
657984e78d3SArtur Trybula 				if ((qat_xform->qat_comp_request_type
658984e78d3SArtur Trybula 				== QAT_COMP_REQUEST_FIXED_COMP_STATELESS) ||
659984e78d3SArtur Trybula 				    (qat_xform->qat_comp_request_type
660984e78d3SArtur Trybula 				== QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) {
661984e78d3SArtur Trybula 
662984e78d3SArtur Trybula 					rx_op->status =
663984e78d3SArtur Trybula 				RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE;
664984e78d3SArtur Trybula 					rx_op->consumed =
665984e78d3SArtur Trybula 						comp_resp->input_byte_counter;
666984e78d3SArtur Trybula 					rx_op->produced =
667984e78d3SArtur Trybula 						comp_resp->output_byte_counter;
668984e78d3SArtur Trybula 				} else
6693dc9ef2dSTomasz Jozwiak 					rx_op->status =
6703dc9ef2dSTomasz Jozwiak 				RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
671984e78d3SArtur Trybula 			} else
672984e78d3SArtur Trybula 				rx_op->status =
673984e78d3SArtur Trybula 				RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
674984e78d3SArtur Trybula 		} else
6753dc9ef2dSTomasz Jozwiak 			rx_op->status = RTE_COMP_OP_STATUS_ERROR;
6764e8f2d6aSFiona Trahe 
677ba83e5c0STomasz Jozwiak 		++(*dequeue_err_count);
67832842f2aSFiona Trahe 		rx_op->debug_status =
67932842f2aSFiona Trahe 			*((uint16_t *)(&resp_msg->comn_resp.comn_error));
68032842f2aSFiona Trahe 	} else {
68132842f2aSFiona Trahe 		struct icp_qat_fw_resp_comp_pars *comp_resp =
68232842f2aSFiona Trahe 		  (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars;
68332842f2aSFiona Trahe 
68432842f2aSFiona Trahe 		rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
68532842f2aSFiona Trahe 		rx_op->consumed = comp_resp->input_byte_counter;
68632842f2aSFiona Trahe 		rx_op->produced = comp_resp->output_byte_counter;
68782822753SAdam Dybkowski 		if (stream)
68882822753SAdam Dybkowski 			stream->start_of_packet = 0;
68932842f2aSFiona Trahe 
69032842f2aSFiona Trahe 		if (qat_xform->checksum_type != RTE_COMP_CHECKSUM_NONE) {
69132842f2aSFiona Trahe 			if (qat_xform->checksum_type == RTE_COMP_CHECKSUM_CRC32)
69232842f2aSFiona Trahe 				rx_op->output_chksum = comp_resp->curr_crc32;
69332842f2aSFiona Trahe 			else if (qat_xform->checksum_type ==
69432842f2aSFiona Trahe 					RTE_COMP_CHECKSUM_ADLER32)
69532842f2aSFiona Trahe 				rx_op->output_chksum = comp_resp->curr_adler_32;
69632842f2aSFiona Trahe 			else
69732842f2aSFiona Trahe 				rx_op->output_chksum = comp_resp->curr_chksum;
69832842f2aSFiona Trahe 		}
69932842f2aSFiona Trahe 	}
700c13cecf6SAdam Dybkowski 	QAT_DP_LOG(DEBUG, "About to check for split op :cookies: %p %p, split:%u",
701c13cecf6SAdam Dybkowski 		cookie, cookie->parent_cookie, cookie->split_op);
702c13cecf6SAdam Dybkowski 
703c13cecf6SAdam Dybkowski 	if (cookie->split_op) {
704c13cecf6SAdam Dybkowski 		*op = NULL;
705c13cecf6SAdam Dybkowski 		struct qat_comp_op_cookie *pc = cookie->parent_cookie;
706c13cecf6SAdam Dybkowski 
707c13cecf6SAdam Dybkowski 		if (cookie->nb_children > 0) {
708c13cecf6SAdam Dybkowski 			QAT_DP_LOG(DEBUG, "Parent");
709c13cecf6SAdam Dybkowski 			/* parent - don't return until all children
710c13cecf6SAdam Dybkowski 			 * responses are collected
711c13cecf6SAdam Dybkowski 			 */
712c13cecf6SAdam Dybkowski 			cookie->total_consumed = rx_op->consumed;
713c13cecf6SAdam Dybkowski 			cookie->total_produced = rx_op->produced;
714c13cecf6SAdam Dybkowski 			if (err) {
715c13cecf6SAdam Dybkowski 				cookie->error = rx_op->status;
716c13cecf6SAdam Dybkowski 				rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
717c13cecf6SAdam Dybkowski 			} else {
718c13cecf6SAdam Dybkowski 				/* calculate dst mbuf and offset for child op */
719c13cecf6SAdam Dybkowski 				qat_comp_mbuf_skip(&cookie->dst_data,
720c13cecf6SAdam Dybkowski 						&cookie->dst_data_offset,
721c13cecf6SAdam Dybkowski 						rx_op->produced);
722c13cecf6SAdam Dybkowski 			}
723c13cecf6SAdam Dybkowski 		} else {
724c13cecf6SAdam Dybkowski 			QAT_DP_LOG(DEBUG, "Child");
725c13cecf6SAdam Dybkowski 			if (pc->error == RTE_COMP_OP_STATUS_SUCCESS) {
726c13cecf6SAdam Dybkowski 				if (err)
727c13cecf6SAdam Dybkowski 					pc->error = rx_op->status;
728c13cecf6SAdam Dybkowski 				if (rx_op->produced) {
729c13cecf6SAdam Dybkowski 					/* this covers both SUCCESS and
730c13cecf6SAdam Dybkowski 					 * OUT_OF_SPACE_RECOVERABLE cases
731c13cecf6SAdam Dybkowski 					 */
732c13cecf6SAdam Dybkowski 					qat_comp_response_data_copy(cookie,
733c13cecf6SAdam Dybkowski 							rx_op);
734c13cecf6SAdam Dybkowski 					pc->total_consumed += rx_op->consumed;
735c13cecf6SAdam Dybkowski 					pc->total_produced += rx_op->produced;
736c13cecf6SAdam Dybkowski 				}
737c13cecf6SAdam Dybkowski 			}
738c13cecf6SAdam Dybkowski 			rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
739c13cecf6SAdam Dybkowski 
740c13cecf6SAdam Dybkowski 			pc->nb_child_responses++;
741c13cecf6SAdam Dybkowski 
742c13cecf6SAdam Dybkowski 			/* (child) cookie fields have to be reset
743c13cecf6SAdam Dybkowski 			 * to avoid problems with reusability -
744c13cecf6SAdam Dybkowski 			 * rx and tx queue starting from index zero
745c13cecf6SAdam Dybkowski 			 */
746c13cecf6SAdam Dybkowski 			cookie->nb_children = 0;
747c13cecf6SAdam Dybkowski 			cookie->split_op = 0;
748c13cecf6SAdam Dybkowski 			cookie->nb_child_responses = 0;
749c13cecf6SAdam Dybkowski 			cookie->dest_buffer = NULL;
750c13cecf6SAdam Dybkowski 
751c13cecf6SAdam Dybkowski 			if (pc->nb_child_responses == pc->nb_children) {
752c13cecf6SAdam Dybkowski 				uint8_t child_resp;
753c13cecf6SAdam Dybkowski 
754c13cecf6SAdam Dybkowski 				/* parent should be included as well */
755c13cecf6SAdam Dybkowski 				child_resp = pc->nb_child_responses + 1;
756c13cecf6SAdam Dybkowski 
757c13cecf6SAdam Dybkowski 				rx_op->status = pc->error;
758c13cecf6SAdam Dybkowski 				rx_op->consumed = pc->total_consumed;
759c13cecf6SAdam Dybkowski 				rx_op->produced = pc->total_produced;
76032842f2aSFiona Trahe 				*op = (void *)rx_op;
76132842f2aSFiona Trahe 
762c13cecf6SAdam Dybkowski 				/* free memzones used for dst data */
763c13cecf6SAdam Dybkowski 				qat_comp_free_split_op_memzones(pc,
764c13cecf6SAdam Dybkowski 						pc->nb_children);
765c13cecf6SAdam Dybkowski 
766c13cecf6SAdam Dybkowski 				/* (parent) cookie fields have to be reset
767c13cecf6SAdam Dybkowski 				 * to avoid problems with reusability -
768c13cecf6SAdam Dybkowski 				 * rx and tx queue starting from index zero
769c13cecf6SAdam Dybkowski 				 */
770c13cecf6SAdam Dybkowski 				pc->nb_children = 0;
771c13cecf6SAdam Dybkowski 				pc->split_op = 0;
772c13cecf6SAdam Dybkowski 				pc->nb_child_responses = 0;
773c13cecf6SAdam Dybkowski 				pc->error = RTE_COMP_OP_STATUS_SUCCESS;
774c13cecf6SAdam Dybkowski 
775c13cecf6SAdam Dybkowski 				return child_resp;
776c13cecf6SAdam Dybkowski 			}
777c13cecf6SAdam Dybkowski 		}
77832842f2aSFiona Trahe 		return 0;
77932842f2aSFiona Trahe 	}
78032842f2aSFiona Trahe 
781c13cecf6SAdam Dybkowski 	*op = (void *)rx_op;
782c13cecf6SAdam Dybkowski 	return 1;
783c13cecf6SAdam Dybkowski }
784c13cecf6SAdam Dybkowski 
7856a7ea148SFiona Trahe unsigned int
qat_comp_xform_size(void)7866a7ea148SFiona Trahe qat_comp_xform_size(void)
7876a7ea148SFiona Trahe {
7886a7ea148SFiona Trahe 	return RTE_ALIGN_CEIL(sizeof(struct qat_comp_xform), 8);
7896a7ea148SFiona Trahe }
7906a7ea148SFiona Trahe 
79182822753SAdam Dybkowski unsigned int
qat_comp_stream_size(void)79282822753SAdam Dybkowski qat_comp_stream_size(void)
79382822753SAdam Dybkowski {
79482822753SAdam Dybkowski 	return RTE_ALIGN_CEIL(sizeof(struct qat_comp_stream), 8);
79582822753SAdam Dybkowski }
79682822753SAdam Dybkowski 
7974c6912d3SFan Zhang static void
qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr * header,enum qat_comp_request_type request)7984c6912d3SFan Zhang qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
7996a7ea148SFiona Trahe 	    enum qat_comp_request_type request)
8006a7ea148SFiona Trahe {
8016a7ea148SFiona Trahe 	if (request == QAT_COMP_REQUEST_FIXED_COMP_STATELESS)
8026a7ea148SFiona Trahe 		header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
8036a7ea148SFiona Trahe 	else if (request == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)
8046a7ea148SFiona Trahe 		header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
8056a7ea148SFiona Trahe 	else if (request == QAT_COMP_REQUEST_DECOMPRESS)
8066a7ea148SFiona Trahe 		header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
8076a7ea148SFiona Trahe 
8086a7ea148SFiona Trahe 	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
8096a7ea148SFiona Trahe 	header->hdr_flags =
8106a7ea148SFiona Trahe 	    ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
8116a7ea148SFiona Trahe 
8126a7ea148SFiona Trahe 	header->comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD(
8136a7ea148SFiona Trahe 	    QAT_COMN_CD_FLD_TYPE_16BYTE_DATA, QAT_COMN_PTR_TYPE_FLAT);
8146a7ea148SFiona Trahe }
8156a7ea148SFiona Trahe 
8164c6912d3SFan Zhang static int
qat_comp_create_templates(struct qat_comp_xform * qat_xform,const struct rte_memzone * interm_buff_mz,const struct rte_comp_xform * xform,const struct qat_comp_stream * stream,enum rte_comp_op_type op_type,enum qat_device_gen qat_dev_gen)8174c6912d3SFan Zhang qat_comp_create_templates(struct qat_comp_xform *qat_xform,
818a124830aSFiona Trahe 			  const struct rte_memzone *interm_buff_mz,
81982822753SAdam Dybkowski 			  const struct rte_comp_xform *xform,
82082822753SAdam Dybkowski 			  const struct qat_comp_stream *stream,
8214c6912d3SFan Zhang 			  enum rte_comp_op_type op_type,
8224c6912d3SFan Zhang 			  enum qat_device_gen qat_dev_gen)
8236a7ea148SFiona Trahe {
8246a7ea148SFiona Trahe 	struct icp_qat_fw_comp_req *comp_req;
8256a7ea148SFiona Trahe 	uint32_t req_par_flags;
8264c6912d3SFan Zhang 	int res;
8276a7ea148SFiona Trahe 
8286a7ea148SFiona Trahe 	if (unlikely(qat_xform == NULL)) {
8296a7ea148SFiona Trahe 		QAT_LOG(ERR, "Session was not created for this device");
8306a7ea148SFiona Trahe 		return -EINVAL;
8316a7ea148SFiona Trahe 	}
8326a7ea148SFiona Trahe 
83382822753SAdam Dybkowski 	if (op_type == RTE_COMP_OP_STATEFUL) {
83482822753SAdam Dybkowski 		if (unlikely(stream == NULL)) {
83582822753SAdam Dybkowski 			QAT_LOG(ERR, "Stream must be non null for stateful op");
83682822753SAdam Dybkowski 			return -EINVAL;
83782822753SAdam Dybkowski 		}
83882822753SAdam Dybkowski 		if (unlikely(qat_xform->qat_comp_request_type !=
83982822753SAdam Dybkowski 			     QAT_COMP_REQUEST_DECOMPRESS)) {
84082822753SAdam Dybkowski 			QAT_LOG(ERR, "QAT PMD does not support stateful compression");
84182822753SAdam Dybkowski 			return -ENOTSUP;
84282822753SAdam Dybkowski 		}
84382822753SAdam Dybkowski 	}
84482822753SAdam Dybkowski 
8454c6912d3SFan Zhang 	if (qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS)
8466a7ea148SFiona Trahe 		req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
8476a7ea148SFiona Trahe 				ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
848c13cecf6SAdam Dybkowski 				ICP_QAT_FW_COMP_BFINAL,
849c13cecf6SAdam Dybkowski 				ICP_QAT_FW_COMP_CNV,
850c13cecf6SAdam Dybkowski 				ICP_QAT_FW_COMP_CNV_RECOVERY);
8514c6912d3SFan Zhang 	else
8526a7ea148SFiona Trahe 		req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
8536a7ea148SFiona Trahe 				ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
8546a7ea148SFiona Trahe 				ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_CNV,
8556a7ea148SFiona Trahe 				ICP_QAT_FW_COMP_CNV_RECOVERY);
8566a7ea148SFiona Trahe 
8576a7ea148SFiona Trahe 	comp_req = &qat_xform->qat_comp_req_tmpl;
8586a7ea148SFiona Trahe 
8596a7ea148SFiona Trahe 	/* Initialize header */
8606a7ea148SFiona Trahe 	qat_comp_create_req_hdr(&comp_req->comn_hdr,
8616a7ea148SFiona Trahe 					qat_xform->qat_comp_request_type);
8626a7ea148SFiona Trahe 
86382822753SAdam Dybkowski 	if (op_type == RTE_COMP_OP_STATEFUL) {
86482822753SAdam Dybkowski 		comp_req->comn_hdr.serv_specif_flags =
86582822753SAdam Dybkowski 				ICP_QAT_FW_COMP_FLAGS_BUILD(
86682822753SAdam Dybkowski 			ICP_QAT_FW_COMP_STATEFUL_SESSION,
86782822753SAdam Dybkowski 			ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
86882822753SAdam Dybkowski 			ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
86982822753SAdam Dybkowski 			ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
87082822753SAdam Dybkowski 			ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
87182822753SAdam Dybkowski 
87282822753SAdam Dybkowski 		/* Decompression state registers */
87382822753SAdam Dybkowski 		comp_req->comp_cd_ctrl.comp_state_addr =
87482822753SAdam Dybkowski 				stream->state_registers_decomp_phys;
87582822753SAdam Dybkowski 
8764c6912d3SFan Zhang 		/* RAM bank flags */
87782822753SAdam Dybkowski 		comp_req->comp_cd_ctrl.ram_bank_flags =
8784c6912d3SFan Zhang 				qat_comp_gen_dev_ops[qat_dev_gen]
8794c6912d3SFan Zhang 					.qat_comp_get_ram_bank_flags();
88082822753SAdam Dybkowski 
88182822753SAdam Dybkowski 		comp_req->comp_cd_ctrl.ram_banks_addr =
88282822753SAdam Dybkowski 				stream->inflate_context_phys;
88382822753SAdam Dybkowski 	} else {
88482822753SAdam Dybkowski 		comp_req->comn_hdr.serv_specif_flags =
88582822753SAdam Dybkowski 				ICP_QAT_FW_COMP_FLAGS_BUILD(
8866a7ea148SFiona Trahe 			ICP_QAT_FW_COMP_STATELESS_SESSION,
8876a7ea148SFiona Trahe 			ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
8886a7ea148SFiona Trahe 			ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
8896a7ea148SFiona Trahe 			ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
890e2e35849STomasz Jozwiak 			ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
89182822753SAdam Dybkowski 	}
8926a7ea148SFiona Trahe 
8934c6912d3SFan Zhang 	res = qat_comp_gen_dev_ops[qat_dev_gen].qat_comp_set_slice_cfg_word(
8944c6912d3SFan Zhang 			qat_xform, xform, op_type,
8954c6912d3SFan Zhang 			comp_req->cd_pars.sl.comp_slice_cfg_word);
8964c6912d3SFan Zhang 	if (res)
8974c6912d3SFan Zhang 		return res;
8986a7ea148SFiona Trahe 
8996a7ea148SFiona Trahe 	comp_req->comp_pars.initial_adler = 1;
9006a7ea148SFiona Trahe 	comp_req->comp_pars.initial_crc32 = 0;
9016a7ea148SFiona Trahe 	comp_req->comp_pars.req_par_flags = req_par_flags;
9026a7ea148SFiona Trahe 
9036a7ea148SFiona Trahe 
9046a7ea148SFiona Trahe 	if (qat_xform->qat_comp_request_type ==
9056a7ea148SFiona Trahe 			QAT_COMP_REQUEST_FIXED_COMP_STATELESS ||
9066a7ea148SFiona Trahe 	    qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
9076a7ea148SFiona Trahe 		ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
9086a7ea148SFiona Trahe 					    ICP_QAT_FW_SLICE_DRAM_WR);
9096a7ea148SFiona Trahe 		ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
9106a7ea148SFiona Trahe 					    ICP_QAT_FW_SLICE_COMP);
9116a7ea148SFiona Trahe 	} else if (qat_xform->qat_comp_request_type ==
9126a7ea148SFiona Trahe 			QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS) {
9136a7ea148SFiona Trahe 
914a124830aSFiona Trahe 		ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
915a124830aSFiona Trahe 				ICP_QAT_FW_SLICE_XLAT);
916a124830aSFiona Trahe 		ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
917a124830aSFiona Trahe 				ICP_QAT_FW_SLICE_COMP);
918a124830aSFiona Trahe 
919a124830aSFiona Trahe 		ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->u2.xlt_cd_ctrl,
920a124830aSFiona Trahe 				ICP_QAT_FW_SLICE_DRAM_WR);
921a124830aSFiona Trahe 		ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->u2.xlt_cd_ctrl,
922a124830aSFiona Trahe 				ICP_QAT_FW_SLICE_XLAT);
923a124830aSFiona Trahe 
924a124830aSFiona Trahe 		comp_req->u1.xlt_pars.inter_buff_ptr =
9254c6912d3SFan Zhang 				(qat_comp_get_num_im_bufs_required(qat_dev_gen)
9264c6912d3SFan Zhang 					== 0) ? 0 : interm_buff_mz->iova;
9276a7ea148SFiona Trahe 	}
9286a7ea148SFiona Trahe 
9296a7ea148SFiona Trahe #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
9306a7ea148SFiona Trahe 	QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message template:", comp_req,
9316a7ea148SFiona Trahe 		    sizeof(struct icp_qat_fw_comp_req));
9326a7ea148SFiona Trahe #endif
9336a7ea148SFiona Trahe 	return 0;
9346a7ea148SFiona Trahe }
9356a7ea148SFiona Trahe 
9366a7ea148SFiona Trahe /**
9376a7ea148SFiona Trahe  * Create driver private_xform data.
9386a7ea148SFiona Trahe  *
9396a7ea148SFiona Trahe  * @param dev
9406a7ea148SFiona Trahe  *   Compressdev device
9416a7ea148SFiona Trahe  * @param xform
9426a7ea148SFiona Trahe  *   xform data from application
9436a7ea148SFiona Trahe  * @param private_xform
9446a7ea148SFiona Trahe  *   ptr where handle of pmd's private_xform data should be stored
9456a7ea148SFiona Trahe  * @return
9466a7ea148SFiona Trahe  *  - if successful returns 0
9476a7ea148SFiona Trahe  *    and valid private_xform handle
9486a7ea148SFiona Trahe  *  - <0 in error cases
9496a7ea148SFiona Trahe  *  - Returns -EINVAL if input parameters are invalid.
9506a7ea148SFiona Trahe  *  - Returns -ENOTSUP if comp device does not support the comp transform.
9516a7ea148SFiona Trahe  *  - Returns -ENOMEM if the private_xform could not be allocated.
9526a7ea148SFiona Trahe  */
9536a7ea148SFiona Trahe int
qat_comp_private_xform_create(struct rte_compressdev * dev,const struct rte_comp_xform * xform,void ** private_xform)9546a7ea148SFiona Trahe qat_comp_private_xform_create(struct rte_compressdev *dev,
9556a7ea148SFiona Trahe 			      const struct rte_comp_xform *xform,
9566a7ea148SFiona Trahe 			      void **private_xform)
9576a7ea148SFiona Trahe {
9586a7ea148SFiona Trahe 	struct qat_comp_dev_private *qat = dev->data->dev_private;
9594c6912d3SFan Zhang 	enum qat_device_gen qat_dev_gen = qat->qat_dev->qat_dev_gen;
9604c6912d3SFan Zhang 	unsigned int im_bufs = qat_comp_get_num_im_bufs_required(qat_dev_gen);
9616a7ea148SFiona Trahe 
9626a7ea148SFiona Trahe 	if (unlikely(private_xform == NULL)) {
9636a7ea148SFiona Trahe 		QAT_LOG(ERR, "QAT: private_xform parameter is NULL");
9646a7ea148SFiona Trahe 		return -EINVAL;
9656a7ea148SFiona Trahe 	}
9666a7ea148SFiona Trahe 	if (unlikely(qat->xformpool == NULL)) {
9676a7ea148SFiona Trahe 		QAT_LOG(ERR, "QAT device has no private_xform mempool");
9686a7ea148SFiona Trahe 		return -ENOMEM;
9696a7ea148SFiona Trahe 	}
9706a7ea148SFiona Trahe 	if (rte_mempool_get(qat->xformpool, private_xform)) {
9716a7ea148SFiona Trahe 		QAT_LOG(ERR, "Couldn't get object from qat xform mempool");
9726a7ea148SFiona Trahe 		return -ENOMEM;
9736a7ea148SFiona Trahe 	}
9746a7ea148SFiona Trahe 
9756a7ea148SFiona Trahe 	struct qat_comp_xform *qat_xform =
9766a7ea148SFiona Trahe 			(struct qat_comp_xform *)*private_xform;
9776a7ea148SFiona Trahe 
9786a7ea148SFiona Trahe 	if (xform->type == RTE_COMP_COMPRESS) {
9796a7ea148SFiona Trahe 
9806a7ea148SFiona Trahe 		if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED ||
9816a7ea148SFiona Trahe 		  ((xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_DEFAULT)
9824c6912d3SFan Zhang 				   && qat->interm_buff_mz == NULL
9834c6912d3SFan Zhang 				   && im_bufs > 0))
9846a7ea148SFiona Trahe 			qat_xform->qat_comp_request_type =
9856a7ea148SFiona Trahe 					QAT_COMP_REQUEST_FIXED_COMP_STATELESS;
9866a7ea148SFiona Trahe 
987a124830aSFiona Trahe 		else if ((xform->compress.deflate.huffman ==
988a124830aSFiona Trahe 				RTE_COMP_HUFFMAN_DYNAMIC ||
989a124830aSFiona Trahe 				xform->compress.deflate.huffman ==
990a124830aSFiona Trahe 						RTE_COMP_HUFFMAN_DEFAULT) &&
9914c6912d3SFan Zhang 				(qat->interm_buff_mz != NULL ||
9924c6912d3SFan Zhang 						im_bufs == 0))
993a124830aSFiona Trahe 
994a124830aSFiona Trahe 			qat_xform->qat_comp_request_type =
995a124830aSFiona Trahe 					QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS;
996a124830aSFiona Trahe 
997a124830aSFiona Trahe 		else {
998a124830aSFiona Trahe 			QAT_LOG(ERR,
999a124830aSFiona Trahe 					"IM buffers needed for dynamic deflate. Set size in config file");
1000a124830aSFiona Trahe 			return -EINVAL;
1001a124830aSFiona Trahe 		}
1002a124830aSFiona Trahe 
10037586c578SFiona Trahe 		qat_xform->checksum_type = xform->compress.chksum;
10046a7ea148SFiona Trahe 
10056a7ea148SFiona Trahe 	} else {
10066a7ea148SFiona Trahe 		qat_xform->qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
10077586c578SFiona Trahe 		qat_xform->checksum_type = xform->decompress.chksum;
10086a7ea148SFiona Trahe 	}
10096a7ea148SFiona Trahe 
101082822753SAdam Dybkowski 	if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform,
10114c6912d3SFan Zhang 				      NULL, RTE_COMP_OP_STATELESS,
10124c6912d3SFan Zhang 				      qat_dev_gen)) {
10136a7ea148SFiona Trahe 		QAT_LOG(ERR, "QAT: Problem with setting compression");
10146a7ea148SFiona Trahe 		return -EINVAL;
10156a7ea148SFiona Trahe 	}
10166a7ea148SFiona Trahe 	return 0;
10176a7ea148SFiona Trahe }
10186a7ea148SFiona Trahe 
10196a7ea148SFiona Trahe /**
10206a7ea148SFiona Trahe  * Free driver private_xform data.
10216a7ea148SFiona Trahe  *
10226a7ea148SFiona Trahe  * @param dev
10236a7ea148SFiona Trahe  *   Compressdev device
10246a7ea148SFiona Trahe  * @param private_xform
10256a7ea148SFiona Trahe  *   handle of pmd's private_xform data
10266a7ea148SFiona Trahe  * @return
10276a7ea148SFiona Trahe  *  - 0 if successful
10286a7ea148SFiona Trahe  *  - <0 in error cases
10296a7ea148SFiona Trahe  *  - Returns -EINVAL if input parameters are invalid.
10306a7ea148SFiona Trahe  */
10316a7ea148SFiona Trahe int
qat_comp_private_xform_free(struct rte_compressdev * dev __rte_unused,void * private_xform)10326a7ea148SFiona Trahe qat_comp_private_xform_free(struct rte_compressdev *dev __rte_unused,
10336a7ea148SFiona Trahe 			    void *private_xform)
10346a7ea148SFiona Trahe {
10356a7ea148SFiona Trahe 	struct qat_comp_xform *qat_xform =
10366a7ea148SFiona Trahe 			(struct qat_comp_xform *)private_xform;
10376a7ea148SFiona Trahe 
10386a7ea148SFiona Trahe 	if (qat_xform) {
10396a7ea148SFiona Trahe 		memset(qat_xform, 0, qat_comp_xform_size());
10406a7ea148SFiona Trahe 		struct rte_mempool *mp = rte_mempool_from_obj(qat_xform);
10416a7ea148SFiona Trahe 
10426a7ea148SFiona Trahe 		rte_mempool_put(mp, qat_xform);
10436a7ea148SFiona Trahe 		return 0;
10446a7ea148SFiona Trahe 	}
10456a7ea148SFiona Trahe 	return -EINVAL;
10466a7ea148SFiona Trahe }
104782822753SAdam Dybkowski 
104882822753SAdam Dybkowski /**
104982822753SAdam Dybkowski  * Reset stream state for the next use.
105082822753SAdam Dybkowski  *
105182822753SAdam Dybkowski  * @param stream
105282822753SAdam Dybkowski  *   handle of pmd's private stream data
105382822753SAdam Dybkowski  */
105482822753SAdam Dybkowski static void
qat_comp_stream_reset(struct qat_comp_stream * stream)105582822753SAdam Dybkowski qat_comp_stream_reset(struct qat_comp_stream *stream)
105682822753SAdam Dybkowski {
105782822753SAdam Dybkowski 	if (stream) {
105882822753SAdam Dybkowski 		memset(&stream->qat_xform, 0, sizeof(struct qat_comp_xform));
105982822753SAdam Dybkowski 		stream->start_of_packet = 1;
106082822753SAdam Dybkowski 		stream->op_in_progress = 0;
106182822753SAdam Dybkowski 	}
106282822753SAdam Dybkowski }
106382822753SAdam Dybkowski 
106482822753SAdam Dybkowski /**
106582822753SAdam Dybkowski  * Create driver private stream data.
106682822753SAdam Dybkowski  *
106782822753SAdam Dybkowski  * @param dev
106882822753SAdam Dybkowski  *   Compressdev device
106982822753SAdam Dybkowski  * @param xform
107082822753SAdam Dybkowski  *   xform data
107182822753SAdam Dybkowski  * @param stream
107282822753SAdam Dybkowski  *   ptr where handle of pmd's private stream data should be stored
107382822753SAdam Dybkowski  * @return
107482822753SAdam Dybkowski  *  - Returns 0 if private stream structure has been created successfully.
107582822753SAdam Dybkowski  *  - Returns -EINVAL if input parameters are invalid.
107682822753SAdam Dybkowski  *  - Returns -ENOTSUP if comp device does not support STATEFUL operations.
107782822753SAdam Dybkowski  *  - Returns -ENOTSUP if comp device does not support the comp transform.
107882822753SAdam Dybkowski  *  - Returns -ENOMEM if the private stream could not be allocated.
107982822753SAdam Dybkowski  */
108082822753SAdam Dybkowski int
qat_comp_stream_create(struct rte_compressdev * dev,const struct rte_comp_xform * xform,void ** stream)108182822753SAdam Dybkowski qat_comp_stream_create(struct rte_compressdev *dev,
108282822753SAdam Dybkowski 		       const struct rte_comp_xform *xform,
108382822753SAdam Dybkowski 		       void **stream)
108482822753SAdam Dybkowski {
108582822753SAdam Dybkowski 	struct qat_comp_dev_private *qat = dev->data->dev_private;
108682822753SAdam Dybkowski 	struct qat_comp_stream *ptr;
108782822753SAdam Dybkowski 
108882822753SAdam Dybkowski 	if (unlikely(stream == NULL)) {
108982822753SAdam Dybkowski 		QAT_LOG(ERR, "QAT: stream parameter is NULL");
109082822753SAdam Dybkowski 		return -EINVAL;
109182822753SAdam Dybkowski 	}
109282822753SAdam Dybkowski 	if (unlikely(xform->type == RTE_COMP_COMPRESS)) {
109382822753SAdam Dybkowski 		QAT_LOG(ERR, "QAT: stateful compression not supported");
109482822753SAdam Dybkowski 		return -ENOTSUP;
109582822753SAdam Dybkowski 	}
109682822753SAdam Dybkowski 	if (unlikely(qat->streampool == NULL)) {
109782822753SAdam Dybkowski 		QAT_LOG(ERR, "QAT device has no stream mempool");
109882822753SAdam Dybkowski 		return -ENOMEM;
109982822753SAdam Dybkowski 	}
110082822753SAdam Dybkowski 	if (rte_mempool_get(qat->streampool, stream)) {
110182822753SAdam Dybkowski 		QAT_LOG(ERR, "Couldn't get object from qat stream mempool");
110282822753SAdam Dybkowski 		return -ENOMEM;
110382822753SAdam Dybkowski 	}
110482822753SAdam Dybkowski 
110582822753SAdam Dybkowski 	ptr = (struct qat_comp_stream *) *stream;
110682822753SAdam Dybkowski 	qat_comp_stream_reset(ptr);
110782822753SAdam Dybkowski 	ptr->qat_xform.qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
110882822753SAdam Dybkowski 	ptr->qat_xform.checksum_type = xform->decompress.chksum;
110982822753SAdam Dybkowski 
111082822753SAdam Dybkowski 	if (qat_comp_create_templates(&ptr->qat_xform, qat->interm_buff_mz,
11114c6912d3SFan Zhang 				      xform, ptr, RTE_COMP_OP_STATEFUL,
11124c6912d3SFan Zhang 				      qat->qat_dev->qat_dev_gen)) {
111382822753SAdam Dybkowski 		QAT_LOG(ERR, "QAT: problem with creating descriptor template for stream");
111482822753SAdam Dybkowski 		rte_mempool_put(qat->streampool, *stream);
111582822753SAdam Dybkowski 		*stream = NULL;
111682822753SAdam Dybkowski 		return -EINVAL;
111782822753SAdam Dybkowski 	}
111882822753SAdam Dybkowski 
111982822753SAdam Dybkowski 	return 0;
112082822753SAdam Dybkowski }
112182822753SAdam Dybkowski 
112282822753SAdam Dybkowski /**
112382822753SAdam Dybkowski  * Free driver private stream data.
112482822753SAdam Dybkowski  *
112582822753SAdam Dybkowski  * @param dev
112682822753SAdam Dybkowski  *   Compressdev device
112782822753SAdam Dybkowski  * @param stream
112882822753SAdam Dybkowski  *   handle of pmd's private stream data
112982822753SAdam Dybkowski  * @return
113082822753SAdam Dybkowski  *  - 0 if successful
113182822753SAdam Dybkowski  *  - <0 in error cases
113282822753SAdam Dybkowski  *  - Returns -EINVAL if input parameters are invalid.
113382822753SAdam Dybkowski  *  - Returns -ENOTSUP if comp device does not support STATEFUL operations.
113482822753SAdam Dybkowski  *  - Returns -EBUSY if can't free stream as there are inflight operations
113582822753SAdam Dybkowski  */
113682822753SAdam Dybkowski int
qat_comp_stream_free(struct rte_compressdev * dev,void * stream)113782822753SAdam Dybkowski qat_comp_stream_free(struct rte_compressdev *dev, void *stream)
113882822753SAdam Dybkowski {
113982822753SAdam Dybkowski 	if (stream) {
114082822753SAdam Dybkowski 		struct qat_comp_dev_private *qat = dev->data->dev_private;
114182822753SAdam Dybkowski 		qat_comp_stream_reset((struct qat_comp_stream *) stream);
114282822753SAdam Dybkowski 		rte_mempool_put(qat->streampool, stream);
114382822753SAdam Dybkowski 		return 0;
114482822753SAdam Dybkowski 	}
114582822753SAdam Dybkowski 	return -EINVAL;
114682822753SAdam Dybkowski }
1147*7cb939f6SVikash Poddar 
1148*7cb939f6SVikash Poddar /**
1149*7cb939f6SVikash Poddar  * Enqueue packets for processing on queue pair of a device
1150*7cb939f6SVikash Poddar  *
1151*7cb939f6SVikash Poddar  * @param qp
1152*7cb939f6SVikash Poddar  *   qat queue pair
1153*7cb939f6SVikash Poddar  * @param ops
1154*7cb939f6SVikash Poddar  *   Compressdev operation
1155*7cb939f6SVikash Poddar  * @param nb_ops
1156*7cb939f6SVikash Poddar  *   number of operations
1157*7cb939f6SVikash Poddar  * @return
1158*7cb939f6SVikash Poddar  *  - nb_ops_sent if successful
1159*7cb939f6SVikash Poddar  */
1160*7cb939f6SVikash Poddar uint16_t
qat_enqueue_comp_op_burst(void * qp,void ** ops,uint16_t nb_ops)1161*7cb939f6SVikash Poddar qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
1162*7cb939f6SVikash Poddar {
1163*7cb939f6SVikash Poddar 	register struct qat_queue *queue;
1164*7cb939f6SVikash Poddar 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
1165*7cb939f6SVikash Poddar 	register uint32_t nb_ops_sent = 0;
1166*7cb939f6SVikash Poddar 	register int nb_desc_to_build;
1167*7cb939f6SVikash Poddar 	uint16_t nb_ops_possible = nb_ops;
1168*7cb939f6SVikash Poddar 	register uint8_t *base_addr;
1169*7cb939f6SVikash Poddar 	register uint32_t tail;
1170*7cb939f6SVikash Poddar 
1171*7cb939f6SVikash Poddar 	int descriptors_built, total_descriptors_built = 0;
1172*7cb939f6SVikash Poddar 	int nb_remaining_descriptors;
1173*7cb939f6SVikash Poddar 	int overflow = 0;
1174*7cb939f6SVikash Poddar 
1175*7cb939f6SVikash Poddar 	if (unlikely(nb_ops == 0))
1176*7cb939f6SVikash Poddar 		return 0;
1177*7cb939f6SVikash Poddar 
1178*7cb939f6SVikash Poddar 	/* read params used a lot in main loop into registers */
1179*7cb939f6SVikash Poddar 	queue = &(tmp_qp->tx_q);
1180*7cb939f6SVikash Poddar 	base_addr = (uint8_t *)queue->base_addr;
1181*7cb939f6SVikash Poddar 	tail = queue->tail;
1182*7cb939f6SVikash Poddar 
1183*7cb939f6SVikash Poddar 	/* Find how many can actually fit on the ring */
1184*7cb939f6SVikash Poddar 	{
1185*7cb939f6SVikash Poddar 		/* dequeued can only be written by one thread, but it may not
1186*7cb939f6SVikash Poddar 		 * be this thread. As it's 4-byte aligned it will be read
1187*7cb939f6SVikash Poddar 		 * atomically here by any Intel CPU.
1188*7cb939f6SVikash Poddar 		 * enqueued can wrap before dequeued, but cannot
1189*7cb939f6SVikash Poddar 		 * lap it as var size of enq/deq (uint32_t) > var size of
1190*7cb939f6SVikash Poddar 		 * max_inflights (uint16_t). In reality inflights is never
1191*7cb939f6SVikash Poddar 		 * even as big as max uint16_t, as it's <= ADF_MAX_DESC.
1192*7cb939f6SVikash Poddar 		 * On wrapping, the calculation still returns the correct
1193*7cb939f6SVikash Poddar 		 * positive value as all three vars are unsigned.
1194*7cb939f6SVikash Poddar 		 */
1195*7cb939f6SVikash Poddar 		uint32_t inflights =
1196*7cb939f6SVikash Poddar 			tmp_qp->enqueued - tmp_qp->dequeued;
1197*7cb939f6SVikash Poddar 
1198*7cb939f6SVikash Poddar 		/* Find how many can actually fit on the ring */
1199*7cb939f6SVikash Poddar 		overflow = (inflights + nb_ops) - tmp_qp->max_inflights;
1200*7cb939f6SVikash Poddar 		if (overflow > 0) {
1201*7cb939f6SVikash Poddar 			nb_ops_possible = nb_ops - overflow;
1202*7cb939f6SVikash Poddar 			if (nb_ops_possible == 0)
1203*7cb939f6SVikash Poddar 				return 0;
1204*7cb939f6SVikash Poddar 		}
1205*7cb939f6SVikash Poddar 
1206*7cb939f6SVikash Poddar 		/* QAT has plenty of work queued already, so don't waste cycles
1207*7cb939f6SVikash Poddar 		 * enqueueing, wait til the application has gathered a bigger
1208*7cb939f6SVikash Poddar 		 * burst or some completed ops have been dequeued
1209*7cb939f6SVikash Poddar 		 */
1210*7cb939f6SVikash Poddar 		if (tmp_qp->min_enq_burst_threshold && inflights >
1211*7cb939f6SVikash Poddar 				QAT_QP_MIN_INFL_THRESHOLD && nb_ops_possible <
1212*7cb939f6SVikash Poddar 				tmp_qp->min_enq_burst_threshold) {
1213*7cb939f6SVikash Poddar 			tmp_qp->stats.threshold_hit_count++;
1214*7cb939f6SVikash Poddar 			return 0;
1215*7cb939f6SVikash Poddar 		}
1216*7cb939f6SVikash Poddar 	}
1217*7cb939f6SVikash Poddar 
1218*7cb939f6SVikash Poddar 	/* At this point nb_ops_possible is assuming a 1:1 mapping
1219*7cb939f6SVikash Poddar 	 * between ops and descriptors.
1220*7cb939f6SVikash Poddar 	 * Fewer may be sent if some ops have to be split.
1221*7cb939f6SVikash Poddar 	 * nb_ops_possible is <= burst size.
1222*7cb939f6SVikash Poddar 	 * Find out how many spaces are actually available on the qp in case
1223*7cb939f6SVikash Poddar 	 * more are needed.
1224*7cb939f6SVikash Poddar 	 */
1225*7cb939f6SVikash Poddar 	nb_remaining_descriptors = nb_ops_possible
1226*7cb939f6SVikash Poddar 			 + ((overflow >= 0) ? 0 : overflow * (-1));
1227*7cb939f6SVikash Poddar 	QAT_DP_LOG(DEBUG, "Nb ops requested %d, nb descriptors remaining %d",
1228*7cb939f6SVikash Poddar 			nb_ops, nb_remaining_descriptors);
1229*7cb939f6SVikash Poddar 
1230*7cb939f6SVikash Poddar 	while (nb_ops_sent != nb_ops_possible &&
1231*7cb939f6SVikash Poddar 				nb_remaining_descriptors > 0) {
1232*7cb939f6SVikash Poddar 		struct qat_comp_op_cookie *cookie =
1233*7cb939f6SVikash Poddar 				tmp_qp->op_cookies[tail >> queue->trailz];
1234*7cb939f6SVikash Poddar 
1235*7cb939f6SVikash Poddar 		descriptors_built = 0;
1236*7cb939f6SVikash Poddar 
1237*7cb939f6SVikash Poddar 		QAT_DP_LOG(DEBUG, "--- data length: %u",
1238*7cb939f6SVikash Poddar 			   ((struct rte_comp_op *)*ops)->src.length);
1239*7cb939f6SVikash Poddar 
1240*7cb939f6SVikash Poddar 		nb_desc_to_build = qat_comp_build_request(*ops,
1241*7cb939f6SVikash Poddar 				base_addr + tail, cookie, tmp_qp->qat_dev_gen);
1242*7cb939f6SVikash Poddar 		QAT_DP_LOG(DEBUG, "%d descriptors built, %d remaining, "
1243*7cb939f6SVikash Poddar 			"%d ops sent, %d descriptors needed",
1244*7cb939f6SVikash Poddar 			total_descriptors_built, nb_remaining_descriptors,
1245*7cb939f6SVikash Poddar 			nb_ops_sent, nb_desc_to_build);
1246*7cb939f6SVikash Poddar 
1247*7cb939f6SVikash Poddar 		if (unlikely(nb_desc_to_build < 0)) {
1248*7cb939f6SVikash Poddar 			/* this message cannot be enqueued */
1249*7cb939f6SVikash Poddar 			tmp_qp->stats.enqueue_err_count++;
1250*7cb939f6SVikash Poddar 			if (nb_ops_sent == 0)
1251*7cb939f6SVikash Poddar 				return 0;
1252*7cb939f6SVikash Poddar 			goto kick_tail;
1253*7cb939f6SVikash Poddar 		} else if (unlikely(nb_desc_to_build > 1)) {
1254*7cb939f6SVikash Poddar 			/* this op is too big and must be split - get more
1255*7cb939f6SVikash Poddar 			 * descriptors and retry
1256*7cb939f6SVikash Poddar 			 */
1257*7cb939f6SVikash Poddar 
1258*7cb939f6SVikash Poddar 			QAT_DP_LOG(DEBUG, "Build %d descriptors for this op",
1259*7cb939f6SVikash Poddar 					nb_desc_to_build);
1260*7cb939f6SVikash Poddar 
1261*7cb939f6SVikash Poddar 			nb_remaining_descriptors -= nb_desc_to_build;
1262*7cb939f6SVikash Poddar 			if (nb_remaining_descriptors >= 0) {
1263*7cb939f6SVikash Poddar 				/* There are enough remaining descriptors
1264*7cb939f6SVikash Poddar 				 * so retry
1265*7cb939f6SVikash Poddar 				 */
1266*7cb939f6SVikash Poddar 				int ret2 = qat_comp_build_multiple_requests(
1267*7cb939f6SVikash Poddar 						*ops, tmp_qp, tail,
1268*7cb939f6SVikash Poddar 						nb_desc_to_build);
1269*7cb939f6SVikash Poddar 
1270*7cb939f6SVikash Poddar 				if (unlikely(ret2 < 1)) {
1271*7cb939f6SVikash Poddar 					QAT_DP_LOG(DEBUG,
1272*7cb939f6SVikash Poddar 							"Failed to build (%d) descriptors, status %d",
1273*7cb939f6SVikash Poddar 							nb_desc_to_build, ret2);
1274*7cb939f6SVikash Poddar 
1275*7cb939f6SVikash Poddar 					qat_comp_free_split_op_memzones(cookie,
1276*7cb939f6SVikash Poddar 							nb_desc_to_build - 1);
1277*7cb939f6SVikash Poddar 
1278*7cb939f6SVikash Poddar 					tmp_qp->stats.enqueue_err_count++;
1279*7cb939f6SVikash Poddar 
1280*7cb939f6SVikash Poddar 					/* This message cannot be enqueued */
1281*7cb939f6SVikash Poddar 					if (nb_ops_sent == 0)
1282*7cb939f6SVikash Poddar 						return 0;
1283*7cb939f6SVikash Poddar 					goto kick_tail;
1284*7cb939f6SVikash Poddar 				} else {
1285*7cb939f6SVikash Poddar 					descriptors_built = ret2;
1286*7cb939f6SVikash Poddar 					total_descriptors_built +=
1287*7cb939f6SVikash Poddar 							descriptors_built;
1288*7cb939f6SVikash Poddar 					nb_remaining_descriptors -=
1289*7cb939f6SVikash Poddar 							descriptors_built;
1290*7cb939f6SVikash Poddar 					QAT_DP_LOG(DEBUG,
1291*7cb939f6SVikash Poddar 							"Multiple descriptors (%d) built ok",
1292*7cb939f6SVikash Poddar 							descriptors_built);
1293*7cb939f6SVikash Poddar 				}
1294*7cb939f6SVikash Poddar 			} else {
1295*7cb939f6SVikash Poddar 				QAT_DP_LOG(ERR, "For the current op, number of requested descriptors (%d) "
1296*7cb939f6SVikash Poddar 						"exceeds number of available descriptors (%d)",
1297*7cb939f6SVikash Poddar 						nb_desc_to_build,
1298*7cb939f6SVikash Poddar 						nb_remaining_descriptors +
1299*7cb939f6SVikash Poddar 							nb_desc_to_build);
1300*7cb939f6SVikash Poddar 
1301*7cb939f6SVikash Poddar 				qat_comp_free_split_op_memzones(cookie,
1302*7cb939f6SVikash Poddar 						nb_desc_to_build - 1);
1303*7cb939f6SVikash Poddar 
1304*7cb939f6SVikash Poddar 				/* Not enough extra descriptors */
1305*7cb939f6SVikash Poddar 				if (nb_ops_sent == 0)
1306*7cb939f6SVikash Poddar 					return 0;
1307*7cb939f6SVikash Poddar 				goto kick_tail;
1308*7cb939f6SVikash Poddar 			}
1309*7cb939f6SVikash Poddar 		} else {
1310*7cb939f6SVikash Poddar 			descriptors_built = 1;
1311*7cb939f6SVikash Poddar 			total_descriptors_built++;
1312*7cb939f6SVikash Poddar 			nb_remaining_descriptors--;
1313*7cb939f6SVikash Poddar 			QAT_DP_LOG(DEBUG, "Single descriptor built ok");
1314*7cb939f6SVikash Poddar 		}
1315*7cb939f6SVikash Poddar 
1316*7cb939f6SVikash Poddar 		tail = adf_modulo(tail + (queue->msg_size * descriptors_built),
1317*7cb939f6SVikash Poddar 				  queue->modulo_mask);
1318*7cb939f6SVikash Poddar 		ops++;
1319*7cb939f6SVikash Poddar 		nb_ops_sent++;
1320*7cb939f6SVikash Poddar 	}
1321*7cb939f6SVikash Poddar 
1322*7cb939f6SVikash Poddar kick_tail:
1323*7cb939f6SVikash Poddar 	queue->tail = tail;
1324*7cb939f6SVikash Poddar 	tmp_qp->enqueued += total_descriptors_built;
1325*7cb939f6SVikash Poddar 	tmp_qp->stats.enqueued_count += nb_ops_sent;
1326*7cb939f6SVikash Poddar 	txq_write_tail(tmp_qp->qat_dev_gen, tmp_qp, queue);
1327*7cb939f6SVikash Poddar 	return nb_ops_sent;
1328*7cb939f6SVikash Poddar }
1329