xref: /dpdk/drivers/compress/qat/qat_comp.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Intel Corporation
3  */
4 
5 #include <rte_mempool.h>
6 #include <rte_mbuf.h>
7 #include <rte_hexdump.h>
8 #include <rte_comp.h>
9 #include <rte_bus_pci.h>
10 #include <rte_byteorder.h>
11 #include <rte_memcpy.h>
12 #include <rte_common.h>
13 #include <rte_spinlock.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_memzone.h>
17 
18 #include "qat_logs.h"
19 #include "qat_comp.h"
20 #include "qat_comp_pmd.h"
21 
22 static void
23 qat_comp_fallback_to_fixed(struct icp_qat_fw_comp_req *comp_req)
24 {
25 	QAT_DP_LOG(DEBUG, "QAT PMD: fallback to fixed compression!");
26 
27 	comp_req->comn_hdr.service_cmd_id =
28 			ICP_QAT_FW_COMP_CMD_STATIC;
29 
30 	ICP_QAT_FW_COMN_NEXT_ID_SET(
31 			&comp_req->comp_cd_ctrl,
32 			ICP_QAT_FW_SLICE_DRAM_WR);
33 
34 	ICP_QAT_FW_COMN_NEXT_ID_SET(
35 			&comp_req->u2.xlt_cd_ctrl,
36 			ICP_QAT_FW_SLICE_NULL);
37 	ICP_QAT_FW_COMN_CURR_ID_SET(
38 			&comp_req->u2.xlt_cd_ctrl,
39 			ICP_QAT_FW_SLICE_NULL);
40 }
41 
42 void
43 qat_comp_free_split_op_memzones(struct qat_comp_op_cookie *cookie,
44 				unsigned int nb_children)
45 {
46 	unsigned int i;
47 
48 	/* free all memzones allocated for child descriptors */
49 	for (i = 0; i < nb_children; i++)
50 		rte_memzone_free(cookie->dst_memzones[i]);
51 
52 	/* and free the pointer table */
53 	rte_free(cookie->dst_memzones);
54 	cookie->dst_memzones = NULL;
55 }
56 
57 static int
58 qat_comp_allocate_split_op_memzones(struct qat_comp_op_cookie *cookie,
59 				    unsigned int nb_descriptors_needed)
60 {
61 	struct qat_queue *txq = &(cookie->qp->tx_q);
62 	char dst_memz_name[RTE_MEMZONE_NAMESIZE];
63 	unsigned int i;
64 
65 	/* allocate the array of memzone pointers */
66 	cookie->dst_memzones = rte_zmalloc_socket("qat PMD im buf mz pointers",
67 			(nb_descriptors_needed - 1) *
68 				sizeof(const struct rte_memzone *),
69 			RTE_CACHE_LINE_SIZE, cookie->socket_id);
70 
71 	if (cookie->dst_memzones == NULL) {
72 		QAT_DP_LOG(ERR,
73 			"QAT PMD: failed to allocate im buf mz pointers");
74 		return -ENOMEM;
75 	}
76 
77 	for (i = 0; i < nb_descriptors_needed - 1; i++) {
78 		snprintf(dst_memz_name,
79 				sizeof(dst_memz_name),
80 				"dst_%u_%u_%u_%u_%u",
81 				cookie->qp->qat_dev->qat_dev_id,
82 				txq->hw_bundle_number, txq->hw_queue_number,
83 				cookie->cookie_index, i);
84 
85 		cookie->dst_memzones[i] = rte_memzone_reserve_aligned(
86 				dst_memz_name, RTE_PMD_QAT_COMP_IM_BUFFER_SIZE,
87 				cookie->socket_id, RTE_MEMZONE_IOVA_CONTIG,
88 				RTE_CACHE_LINE_SIZE);
89 
90 		if (cookie->dst_memzones[i] == NULL) {
91 			QAT_DP_LOG(ERR,
92 				"QAT PMD: failed to allocate dst buffer memzone");
93 
94 			/* let's free all memzones allocated up to now */
95 			qat_comp_free_split_op_memzones(cookie, i);
96 
97 			return -ENOMEM;
98 		}
99 	}
100 
101 	return 0;
102 }
103 
104 int
105 qat_comp_build_request(void *in_op, uint8_t *out_msg,
106 		       void *op_cookie,
107 		       enum qat_device_gen qat_dev_gen __rte_unused)
108 {
109 	struct rte_comp_op *op = in_op;
110 	struct qat_comp_op_cookie *cookie =
111 			(struct qat_comp_op_cookie *)op_cookie;
112 	struct qat_comp_stream *stream;
113 	struct qat_comp_xform *qat_xform;
114 	const uint8_t *tmpl;
115 	struct icp_qat_fw_comp_req *comp_req =
116 	    (struct icp_qat_fw_comp_req *)out_msg;
117 
118 	if (op->op_type == RTE_COMP_OP_STATEFUL) {
119 		stream = op->stream;
120 		qat_xform = &stream->qat_xform;
121 		if (unlikely(qat_xform->qat_comp_request_type !=
122 			     QAT_COMP_REQUEST_DECOMPRESS)) {
123 			QAT_DP_LOG(ERR, "QAT PMD does not support stateful compression");
124 			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
125 			return -EINVAL;
126 		}
127 		if (unlikely(stream->op_in_progress)) {
128 			QAT_DP_LOG(ERR, "QAT PMD does not support running multiple stateful operations on the same stream at once");
129 			op->status = RTE_COMP_OP_STATUS_INVALID_STATE;
130 			return -EINVAL;
131 		}
132 		stream->op_in_progress = 1;
133 	} else {
134 		stream = NULL;
135 		qat_xform = op->private_xform;
136 	}
137 	tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
138 
139 	rte_mov128(out_msg, tmpl);
140 	comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
141 
142 	if (likely(qat_xform->qat_comp_request_type ==
143 			QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) {
144 
145 		if (unlikely(op->src.length > QAT_FALLBACK_THLD)) {
146 			/* the operation must be split into pieces */
147 			if (qat_xform->checksum_type !=
148 					RTE_COMP_CHECKSUM_NONE) {
149 				/* fallback to fixed compression in case any
150 				 * checksum calculation was requested
151 				 */
152 				qat_comp_fallback_to_fixed(comp_req);
153 			} else {
154 				/* calculate num. of descriptors for split op */
155 				unsigned int nb_descriptors_needed =
156 					op->src.length / QAT_FALLBACK_THLD + 1;
157 				/* allocate memzone for output data */
158 				if (qat_comp_allocate_split_op_memzones(
159 					       cookie, nb_descriptors_needed)) {
160 					/* out of memory, fallback to fixed */
161 					qat_comp_fallback_to_fixed(comp_req);
162 				} else {
163 					QAT_DP_LOG(DEBUG,
164 							"Input data is too big, op must be split into %u descriptors",
165 							nb_descriptors_needed);
166 					return (int) nb_descriptors_needed;
167 				}
168 			}
169 		}
170 
171 		/* set BFINAL bit according to flush_flag */
172 		comp_req->comp_pars.req_par_flags =
173 			ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
174 				ICP_QAT_FW_COMP_SOP,
175 				ICP_QAT_FW_COMP_EOP,
176 				op->flush_flag == RTE_COMP_FLUSH_FINAL ?
177 					ICP_QAT_FW_COMP_BFINAL
178 					: ICP_QAT_FW_COMP_NOT_BFINAL,
179 				ICP_QAT_FW_COMP_CNV,
180 				ICP_QAT_FW_COMP_CNV_RECOVERY);
181 
182 	} else if (op->op_type == RTE_COMP_OP_STATEFUL) {
183 
184 		comp_req->comp_pars.req_par_flags =
185 			ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
186 				(stream->start_of_packet) ?
187 					ICP_QAT_FW_COMP_SOP
188 				      : ICP_QAT_FW_COMP_NOT_SOP,
189 				(op->flush_flag == RTE_COMP_FLUSH_FULL ||
190 				 op->flush_flag == RTE_COMP_FLUSH_FINAL) ?
191 					ICP_QAT_FW_COMP_EOP
192 				      : ICP_QAT_FW_COMP_NOT_EOP,
193 				ICP_QAT_FW_COMP_NOT_BFINAL,
194 				ICP_QAT_FW_COMP_NO_CNV,
195 				ICP_QAT_FW_COMP_NO_CNV_RECOVERY);
196 	}
197 
198 	/* common for sgl and flat buffers */
199 	comp_req->comp_pars.comp_len = op->src.length;
200 	comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst) -
201 			op->dst.offset;
202 
203 	if (op->m_src->next != NULL || op->m_dst->next != NULL) {
204 		/* sgl */
205 		int ret = 0;
206 
207 		ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
208 				QAT_COMN_PTR_TYPE_SGL);
209 
210 		if (unlikely(op->m_src->nb_segs > cookie->src_nb_elems)) {
211 			/* we need to allocate more elements in SGL*/
212 			void *tmp;
213 
214 			tmp = rte_realloc_socket(cookie->qat_sgl_src_d,
215 					  sizeof(struct qat_sgl) +
216 					  sizeof(struct qat_flat_buf) *
217 					  op->m_src->nb_segs, 64,
218 					  cookie->socket_id);
219 
220 			if (unlikely(tmp == NULL)) {
221 				QAT_DP_LOG(ERR, "QAT PMD can't allocate memory"
222 					   " for %d elements of SGL",
223 					   op->m_src->nb_segs);
224 				op->status = RTE_COMP_OP_STATUS_ERROR;
225 				/* clear op-in-progress flag */
226 				if (stream)
227 					stream->op_in_progress = 0;
228 				return -ENOMEM;
229 			}
230 			/* new SGL is valid now */
231 			cookie->qat_sgl_src_d = (struct qat_sgl *)tmp;
232 			cookie->src_nb_elems = op->m_src->nb_segs;
233 			cookie->qat_sgl_src_phys_addr =
234 				rte_malloc_virt2iova(cookie->qat_sgl_src_d);
235 		}
236 
237 		ret = qat_sgl_fill_array(op->m_src,
238 				op->src.offset,
239 				cookie->qat_sgl_src_d,
240 				op->src.length,
241 				cookie->src_nb_elems);
242 		if (ret) {
243 			QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array");
244 			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
245 			/* clear op-in-progress flag */
246 			if (stream)
247 				stream->op_in_progress = 0;
248 			return ret;
249 		}
250 
251 		if (unlikely(op->m_dst->nb_segs > cookie->dst_nb_elems)) {
252 			/* we need to allocate more elements in SGL*/
253 			struct qat_sgl *tmp;
254 
255 			tmp = rte_realloc_socket(cookie->qat_sgl_dst_d,
256 					  sizeof(struct qat_sgl) +
257 					  sizeof(struct qat_flat_buf) *
258 					  op->m_dst->nb_segs, 64,
259 					  cookie->socket_id);
260 
261 			if (unlikely(tmp == NULL)) {
262 				QAT_DP_LOG(ERR, "QAT PMD can't allocate memory"
263 					   " for %d elements of SGL",
264 					   op->m_dst->nb_segs);
265 				op->status = RTE_COMP_OP_STATUS_ERROR;
266 				/* clear op-in-progress flag */
267 				if (stream)
268 					stream->op_in_progress = 0;
269 				return -ENOMEM;
270 			}
271 			/* new SGL is valid now */
272 			cookie->qat_sgl_dst_d = (struct qat_sgl *)tmp;
273 			cookie->dst_nb_elems = op->m_dst->nb_segs;
274 			cookie->qat_sgl_dst_phys_addr =
275 				rte_malloc_virt2iova(cookie->qat_sgl_dst_d);
276 		}
277 
278 		ret = qat_sgl_fill_array(op->m_dst,
279 				op->dst.offset,
280 				cookie->qat_sgl_dst_d,
281 				comp_req->comp_pars.out_buffer_sz,
282 				cookie->dst_nb_elems);
283 		if (ret) {
284 			QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array");
285 			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
286 			/* clear op-in-progress flag */
287 			if (stream)
288 				stream->op_in_progress = 0;
289 			return ret;
290 		}
291 
292 		comp_req->comn_mid.src_data_addr =
293 				cookie->qat_sgl_src_phys_addr;
294 		comp_req->comn_mid.dest_data_addr =
295 				cookie->qat_sgl_dst_phys_addr;
296 		comp_req->comn_mid.src_length = 0;
297 		comp_req->comn_mid.dst_length = 0;
298 
299 	} else {
300 		/* flat aka linear buffer */
301 		ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
302 				QAT_COMN_PTR_TYPE_FLAT);
303 		comp_req->comn_mid.src_length = op->src.length;
304 		comp_req->comn_mid.dst_length =
305 				comp_req->comp_pars.out_buffer_sz;
306 
307 		comp_req->comn_mid.src_data_addr =
308 		    rte_pktmbuf_iova_offset(op->m_src, op->src.offset);
309 		comp_req->comn_mid.dest_data_addr =
310 		    rte_pktmbuf_iova_offset(op->m_dst, op->dst.offset);
311 	}
312 
313 	if (unlikely(rte_pktmbuf_pkt_len(op->m_dst) < QAT_MIN_OUT_BUF_SIZE)) {
314 		/* QAT doesn't support dest. buffer lower
315 		 * than QAT_MIN_OUT_BUF_SIZE. Propagate error mark
316 		 * by converting this request to the null one
317 		 * and check the status in the response.
318 		 */
319 		QAT_DP_LOG(WARNING, "QAT destination buffer too small - resend with larger buffer");
320 		comp_req->comn_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
321 		comp_req->comn_hdr.service_cmd_id = ICP_QAT_FW_NULL_REQ_SERV_ID;
322 		cookie->error = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
323 	}
324 
325 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
326 	QAT_DP_LOG(DEBUG, "Direction: %s",
327 	    qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
328 			    "decompression" : "compression");
329 	QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message:", comp_req,
330 		    sizeof(struct icp_qat_fw_comp_req));
331 #endif
332 	return 0;
333 }
334 
335 static inline uint32_t adf_modulo(uint32_t data, uint32_t modulo_mask)
336 {
337 	return data & modulo_mask;
338 }
339 
340 static inline void
341 qat_comp_mbuf_skip(struct rte_mbuf **mbuf, uint32_t *offset, uint32_t len)
342 {
343 	while (*offset + len >= rte_pktmbuf_data_len(*mbuf)) {
344 		len -= (rte_pktmbuf_data_len(*mbuf) - *offset);
345 		*mbuf = (*mbuf)->next;
346 		*offset = 0;
347 	}
348 	*offset = len;
349 }
350 
351 int
352 qat_comp_build_multiple_requests(void *in_op, struct qat_qp *qp,
353 				 uint32_t parent_tail, int nb_descr)
354 {
355 	struct rte_comp_op op_backup;
356 	struct rte_mbuf dst_mbuf;
357 	struct rte_comp_op *op = in_op;
358 	struct qat_queue *txq = &(qp->tx_q);
359 	uint8_t *base_addr = (uint8_t *)txq->base_addr;
360 	uint8_t *out_msg = base_addr + parent_tail;
361 	uint32_t tail = parent_tail;
362 	struct icp_qat_fw_comp_req *comp_req =
363 			(struct icp_qat_fw_comp_req *)out_msg;
364 	struct qat_comp_op_cookie *parent_cookie =
365 			(struct qat_comp_op_cookie *)
366 			qp->op_cookies[parent_tail / txq->msg_size];
367 	struct qat_comp_op_cookie *child_cookie;
368 	uint16_t dst_data_size =
369 			RTE_MIN(RTE_PMD_QAT_COMP_IM_BUFFER_SIZE, 65535);
370 	uint32_t data_to_enqueue = op->src.length - QAT_FALLBACK_THLD;
371 	int num_descriptors_built = 1;
372 	int ret;
373 
374 	QAT_DP_LOG(DEBUG, "op %p, parent_cookie %p", op, parent_cookie);
375 
376 	/* copy original op to the local variable for restoring later */
377 	rte_memcpy(&op_backup, op, sizeof(op_backup));
378 
379 	parent_cookie->nb_child_responses = 0;
380 	parent_cookie->nb_children = 0;
381 	parent_cookie->split_op = 1;
382 	parent_cookie->dst_data = op->m_dst;
383 	parent_cookie->dst_data_offset = op->dst.offset;
384 
385 	op->src.length = QAT_FALLBACK_THLD;
386 	op->flush_flag = RTE_COMP_FLUSH_FULL;
387 
388 	QAT_DP_LOG(DEBUG, "parent op src len %u dst len %u",
389 			op->src.length, op->m_dst->pkt_len);
390 
391 	ret = qat_comp_build_request(in_op, out_msg, parent_cookie,
392 			qp->qat_dev_gen);
393 	if (ret != 0) {
394 		/* restore op and clear cookie */
395 		QAT_DP_LOG(WARNING, "Failed to build parent descriptor");
396 		op->src.length = op_backup.src.length;
397 		op->flush_flag = op_backup.flush_flag;
398 		parent_cookie->split_op = 0;
399 		return ret;
400 	}
401 
402 	/* prepare local dst mbuf */
403 	rte_memcpy(&dst_mbuf, op->m_dst, sizeof(dst_mbuf));
404 	rte_pktmbuf_reset(&dst_mbuf);
405 	dst_mbuf.buf_len = dst_data_size;
406 	dst_mbuf.data_len = dst_data_size;
407 	dst_mbuf.pkt_len = dst_data_size;
408 	dst_mbuf.data_off = 0;
409 
410 	/* update op for the child operations */
411 	op->m_dst = &dst_mbuf;
412 	op->dst.offset = 0;
413 
414 	while (data_to_enqueue) {
415 		const struct rte_memzone *mz =
416 			parent_cookie->dst_memzones[num_descriptors_built - 1];
417 		uint32_t src_data_size = RTE_MIN(data_to_enqueue,
418 				QAT_FALLBACK_THLD);
419 		uint32_t cookie_index;
420 
421 		/* update params for the next op */
422 		op->src.offset += QAT_FALLBACK_THLD;
423 		op->src.length = src_data_size;
424 		op->flush_flag = (src_data_size == data_to_enqueue) ?
425 			op_backup.flush_flag : RTE_COMP_FLUSH_FULL;
426 
427 		/* update dst mbuf for the next op (use memzone for dst data) */
428 		dst_mbuf.buf_addr = mz->addr;
429 		dst_mbuf.buf_iova = mz->iova;
430 
431 		/* move the tail and calculate next cookie index */
432 		tail = adf_modulo(tail + txq->msg_size, txq->modulo_mask);
433 		cookie_index = tail / txq->msg_size;
434 		child_cookie = (struct qat_comp_op_cookie *)
435 				qp->op_cookies[cookie_index];
436 		comp_req = (struct icp_qat_fw_comp_req *)(base_addr + tail);
437 
438 		/* update child cookie */
439 		child_cookie->split_op = 1; /* must be set for child as well */
440 		child_cookie->parent_cookie = parent_cookie; /* same as above */
441 		child_cookie->nb_children = 0;
442 		child_cookie->dest_buffer = mz->addr;
443 
444 		QAT_DP_LOG(DEBUG,
445 				"cookie_index %u, child_cookie %p, comp_req %p",
446 				cookie_index, child_cookie, comp_req);
447 		QAT_DP_LOG(DEBUG,
448 				"data_to_enqueue %u, num_descriptors_built %d",
449 				data_to_enqueue, num_descriptors_built);
450 		QAT_DP_LOG(DEBUG, "child op src len %u dst len %u",
451 				op->src.length, op->m_dst->pkt_len);
452 
453 		/* build the request */
454 		ret = qat_comp_build_request(op, (uint8_t *)comp_req,
455 				child_cookie, qp->qat_dev_gen);
456 		if (ret < 0) {
457 			QAT_DP_LOG(WARNING, "Failed to build child descriptor");
458 			/* restore op and clear cookie */
459 			rte_memcpy(op, &op_backup, sizeof(op_backup));
460 			parent_cookie->split_op = 0;
461 			parent_cookie->nb_children = 0;
462 			return ret;
463 		}
464 
465 		data_to_enqueue -= src_data_size;
466 		num_descriptors_built++;
467 	}
468 
469 	/* restore backed up original op */
470 	rte_memcpy(op, &op_backup, sizeof(op_backup));
471 
472 	if (nb_descr != num_descriptors_built)
473 		QAT_DP_LOG(ERR, "split op. expected %d, built %d",
474 				nb_descr, num_descriptors_built);
475 
476 	parent_cookie->nb_children = num_descriptors_built - 1;
477 	return num_descriptors_built;
478 }
479 
480 static inline void
481 qat_comp_response_data_copy(struct qat_comp_op_cookie *cookie,
482 		       struct rte_comp_op *rx_op)
483 {
484 	struct qat_comp_op_cookie *pc = cookie->parent_cookie;
485 	struct rte_mbuf *sgl_buf = pc->dst_data;
486 	void *op_dst_addr = rte_pktmbuf_mtod_offset(sgl_buf, uint8_t *,
487 						    pc->dst_data_offset);
488 
489 	/* number of bytes left in the current segment */
490 	uint32_t left_in_current = rte_pktmbuf_data_len(sgl_buf) -
491 			pc->dst_data_offset;
492 
493 	uint32_t prod, sent;
494 
495 	if (rx_op->produced <= left_in_current) {
496 		rte_memcpy(op_dst_addr, cookie->dest_buffer,
497 				rx_op->produced);
498 		/* calculate dst mbuf and offset for the next child op */
499 		if (rx_op->produced == left_in_current) {
500 			pc->dst_data = sgl_buf->next;
501 			pc->dst_data_offset = 0;
502 		} else
503 			pc->dst_data_offset += rx_op->produced;
504 	} else {
505 		rte_memcpy(op_dst_addr, cookie->dest_buffer,
506 				left_in_current);
507 		sgl_buf = sgl_buf->next;
508 		prod = rx_op->produced - left_in_current;
509 		sent = left_in_current;
510 		while (prod > rte_pktmbuf_data_len(sgl_buf)) {
511 			op_dst_addr = rte_pktmbuf_mtod_offset(sgl_buf,
512 					uint8_t *, 0);
513 
514 			rte_memcpy(op_dst_addr,
515 					((uint8_t *)cookie->dest_buffer) +
516 					sent,
517 					rte_pktmbuf_data_len(sgl_buf));
518 
519 			prod -= rte_pktmbuf_data_len(sgl_buf);
520 			sent += rte_pktmbuf_data_len(sgl_buf);
521 
522 			sgl_buf = sgl_buf->next;
523 		}
524 
525 		op_dst_addr = rte_pktmbuf_mtod_offset(sgl_buf, uint8_t *, 0);
526 
527 		rte_memcpy(op_dst_addr,
528 				((uint8_t *)cookie->dest_buffer) + sent,
529 				prod);
530 
531 		/* calculate dst mbuf and offset for the next child op */
532 		if (prod == rte_pktmbuf_data_len(sgl_buf)) {
533 			pc->dst_data = sgl_buf->next;
534 			pc->dst_data_offset = 0;
535 		} else {
536 			pc->dst_data = sgl_buf;
537 			pc->dst_data_offset = prod;
538 		}
539 	}
540 }
541 
542 int
543 qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie,
544 			  uint64_t *dequeue_err_count)
545 {
546 	struct icp_qat_fw_comp_resp *resp_msg =
547 			(struct icp_qat_fw_comp_resp *)resp;
548 	struct qat_comp_op_cookie *cookie =
549 			(struct qat_comp_op_cookie *)op_cookie;
550 
551 	struct icp_qat_fw_resp_comp_pars *comp_resp1 =
552 	  (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars;
553 
554 	QAT_DP_LOG(DEBUG, "input counter = %u, output counter = %u",
555 		   comp_resp1->input_byte_counter,
556 		   comp_resp1->output_byte_counter);
557 
558 	struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t)
559 			(resp_msg->opaque_data);
560 	struct qat_comp_stream *stream;
561 	struct qat_comp_xform *qat_xform;
562 	int err = resp_msg->comn_resp.comn_status &
563 			((1 << QAT_COMN_RESP_CMP_STATUS_BITPOS) |
564 			 (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS));
565 
566 	if (rx_op->op_type == RTE_COMP_OP_STATEFUL) {
567 		stream = rx_op->stream;
568 		qat_xform = &stream->qat_xform;
569 		/* clear op-in-progress flag */
570 		stream->op_in_progress = 0;
571 	} else {
572 		stream = NULL;
573 		qat_xform = rx_op->private_xform;
574 	}
575 
576 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
577 	QAT_DP_LOG(DEBUG, "Direction: %s",
578 	    qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
579 	    "decompression" : "compression");
580 	QAT_DP_HEXDUMP_LOG(DEBUG,  "qat_response:", (uint8_t *)resp_msg,
581 			sizeof(struct icp_qat_fw_comp_resp));
582 #endif
583 
584 	if (unlikely(cookie->error)) {
585 		rx_op->status = cookie->error;
586 		cookie->error = 0;
587 		++(*dequeue_err_count);
588 		rx_op->debug_status = 0;
589 		rx_op->consumed = 0;
590 		rx_op->produced = 0;
591 		*op = (void *)rx_op;
592 		/* also in this case number of returned ops */
593 		/* must be equal to one, */
594 		/* appropriate status (error) must be set as well */
595 		return 1;
596 	}
597 
598 	if (likely(qat_xform->qat_comp_request_type
599 			!= QAT_COMP_REQUEST_DECOMPRESS)) {
600 		if (unlikely(ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(
601 				resp_msg->comn_resp.hdr_flags)
602 					== ICP_QAT_FW_COMP_NO_CNV)) {
603 			rx_op->status = RTE_COMP_OP_STATUS_ERROR;
604 			rx_op->debug_status = ERR_CODE_QAT_COMP_WRONG_FW;
605 			*op = (void *)rx_op;
606 			QAT_DP_LOG(ERR, "QAT has wrong firmware");
607 			++(*dequeue_err_count);
608 			return 1;
609 		}
610 	}
611 
612 	if (err) {
613 		if (unlikely((err & (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS))
614 			     &&	(qat_xform->qat_comp_request_type
615 				 == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS))) {
616 			QAT_DP_LOG(ERR, "QAT intermediate buffer may be too "
617 			    "small for output, try configuring a larger size");
618 		}
619 
620 		int8_t cmp_err_code =
621 			(int8_t)resp_msg->comn_resp.comn_error.cmp_err_code;
622 		int8_t xlat_err_code =
623 			(int8_t)resp_msg->comn_resp.comn_error.xlat_err_code;
624 
625 		/* handle recoverable out-of-buffer condition in stateful
626 		 * decompression scenario
627 		 */
628 		if (cmp_err_code == ERR_CODE_OVERFLOW_ERROR && !xlat_err_code
629 				&& qat_xform->qat_comp_request_type
630 					== QAT_COMP_REQUEST_DECOMPRESS
631 				&& rx_op->op_type == RTE_COMP_OP_STATEFUL) {
632 			struct icp_qat_fw_resp_comp_pars *comp_resp =
633 					&resp_msg->comp_resp_pars;
634 			rx_op->status =
635 				RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE;
636 			rx_op->consumed = comp_resp->input_byte_counter;
637 			rx_op->produced = comp_resp->output_byte_counter;
638 			stream->start_of_packet = 0;
639 		} else if ((cmp_err_code == ERR_CODE_OVERFLOW_ERROR
640 			  && !xlat_err_code)
641 				||
642 		    (!cmp_err_code && xlat_err_code == ERR_CODE_OVERFLOW_ERROR)
643 				||
644 		    (cmp_err_code == ERR_CODE_OVERFLOW_ERROR &&
645 		     xlat_err_code == ERR_CODE_OVERFLOW_ERROR)){
646 
647 			struct icp_qat_fw_resp_comp_pars *comp_resp =
648 					(struct icp_qat_fw_resp_comp_pars *)
649 					&resp_msg->comp_resp_pars;
650 
651 			/* handle recoverable out-of-buffer condition
652 			 * in stateless compression scenario
653 			 */
654 			if (comp_resp->input_byte_counter) {
655 				if ((qat_xform->qat_comp_request_type
656 				== QAT_COMP_REQUEST_FIXED_COMP_STATELESS) ||
657 				    (qat_xform->qat_comp_request_type
658 				== QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) {
659 
660 					rx_op->status =
661 				RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE;
662 					rx_op->consumed =
663 						comp_resp->input_byte_counter;
664 					rx_op->produced =
665 						comp_resp->output_byte_counter;
666 				} else
667 					rx_op->status =
668 				RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
669 			} else
670 				rx_op->status =
671 				RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
672 		} else
673 			rx_op->status = RTE_COMP_OP_STATUS_ERROR;
674 
675 		++(*dequeue_err_count);
676 		rx_op->debug_status =
677 			*((uint16_t *)(&resp_msg->comn_resp.comn_error));
678 	} else {
679 		struct icp_qat_fw_resp_comp_pars *comp_resp =
680 		  (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars;
681 
682 		rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
683 		rx_op->consumed = comp_resp->input_byte_counter;
684 		rx_op->produced = comp_resp->output_byte_counter;
685 		if (stream)
686 			stream->start_of_packet = 0;
687 
688 		if (qat_xform->checksum_type != RTE_COMP_CHECKSUM_NONE) {
689 			if (qat_xform->checksum_type == RTE_COMP_CHECKSUM_CRC32)
690 				rx_op->output_chksum = comp_resp->curr_crc32;
691 			else if (qat_xform->checksum_type ==
692 					RTE_COMP_CHECKSUM_ADLER32)
693 				rx_op->output_chksum = comp_resp->curr_adler_32;
694 			else
695 				rx_op->output_chksum = comp_resp->curr_chksum;
696 		}
697 	}
698 	QAT_DP_LOG(DEBUG, "About to check for split op :cookies: %p %p, split:%u",
699 		cookie, cookie->parent_cookie, cookie->split_op);
700 
701 	if (cookie->split_op) {
702 		*op = NULL;
703 		struct qat_comp_op_cookie *pc = cookie->parent_cookie;
704 
705 		if (cookie->nb_children > 0) {
706 			QAT_DP_LOG(DEBUG, "Parent");
707 			/* parent - don't return until all children
708 			 * responses are collected
709 			 */
710 			cookie->total_consumed = rx_op->consumed;
711 			cookie->total_produced = rx_op->produced;
712 			if (err) {
713 				cookie->error = rx_op->status;
714 				rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
715 			} else {
716 				/* calculate dst mbuf and offset for child op */
717 				qat_comp_mbuf_skip(&cookie->dst_data,
718 						&cookie->dst_data_offset,
719 						rx_op->produced);
720 			}
721 		} else {
722 			QAT_DP_LOG(DEBUG, "Child");
723 			if (pc->error == RTE_COMP_OP_STATUS_SUCCESS) {
724 				if (err)
725 					pc->error = rx_op->status;
726 				if (rx_op->produced) {
727 					/* this covers both SUCCESS and
728 					 * OUT_OF_SPACE_RECOVERABLE cases
729 					 */
730 					qat_comp_response_data_copy(cookie,
731 							rx_op);
732 					pc->total_consumed += rx_op->consumed;
733 					pc->total_produced += rx_op->produced;
734 				}
735 			}
736 			rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
737 
738 			pc->nb_child_responses++;
739 
740 			/* (child) cookie fields have to be reset
741 			 * to avoid problems with reusability -
742 			 * rx and tx queue starting from index zero
743 			 */
744 			cookie->nb_children = 0;
745 			cookie->split_op = 0;
746 			cookie->nb_child_responses = 0;
747 			cookie->dest_buffer = NULL;
748 
749 			if (pc->nb_child_responses == pc->nb_children) {
750 				uint8_t child_resp;
751 
752 				/* parent should be included as well */
753 				child_resp = pc->nb_child_responses + 1;
754 
755 				rx_op->status = pc->error;
756 				rx_op->consumed = pc->total_consumed;
757 				rx_op->produced = pc->total_produced;
758 				*op = (void *)rx_op;
759 
760 				/* free memzones used for dst data */
761 				qat_comp_free_split_op_memzones(pc,
762 						pc->nb_children);
763 
764 				/* (parent) cookie fields have to be reset
765 				 * to avoid problems with reusability -
766 				 * rx and tx queue starting from index zero
767 				 */
768 				pc->nb_children = 0;
769 				pc->split_op = 0;
770 				pc->nb_child_responses = 0;
771 				pc->error = RTE_COMP_OP_STATUS_SUCCESS;
772 
773 				return child_resp;
774 			}
775 		}
776 		return 0;
777 	}
778 
779 	*op = (void *)rx_op;
780 	return 1;
781 }
782 
783 unsigned int
784 qat_comp_xform_size(void)
785 {
786 	return RTE_ALIGN_CEIL(sizeof(struct qat_comp_xform), 8);
787 }
788 
789 unsigned int
790 qat_comp_stream_size(void)
791 {
792 	return RTE_ALIGN_CEIL(sizeof(struct qat_comp_stream), 8);
793 }
794 
795 static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
796 				    enum qat_comp_request_type request)
797 {
798 	if (request == QAT_COMP_REQUEST_FIXED_COMP_STATELESS)
799 		header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
800 	else if (request == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)
801 		header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
802 	else if (request == QAT_COMP_REQUEST_DECOMPRESS)
803 		header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
804 
805 	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
806 	header->hdr_flags =
807 	    ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
808 
809 	header->comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD(
810 	    QAT_COMN_CD_FLD_TYPE_16BYTE_DATA, QAT_COMN_PTR_TYPE_FLAT);
811 }
812 
813 static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
814 			const struct rte_memzone *interm_buff_mz,
815 			const struct rte_comp_xform *xform,
816 			const struct qat_comp_stream *stream,
817 			enum rte_comp_op_type op_type)
818 {
819 	struct icp_qat_fw_comp_req *comp_req;
820 	int comp_level, algo;
821 	uint32_t req_par_flags;
822 	int direction = ICP_QAT_HW_COMPRESSION_DIR_COMPRESS;
823 
824 	if (unlikely(qat_xform == NULL)) {
825 		QAT_LOG(ERR, "Session was not created for this device");
826 		return -EINVAL;
827 	}
828 
829 	if (op_type == RTE_COMP_OP_STATEFUL) {
830 		if (unlikely(stream == NULL)) {
831 			QAT_LOG(ERR, "Stream must be non null for stateful op");
832 			return -EINVAL;
833 		}
834 		if (unlikely(qat_xform->qat_comp_request_type !=
835 			     QAT_COMP_REQUEST_DECOMPRESS)) {
836 			QAT_LOG(ERR, "QAT PMD does not support stateful compression");
837 			return -ENOTSUP;
838 		}
839 	}
840 
841 	if (qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
842 		direction = ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS;
843 		comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
844 		req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
845 				ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
846 				ICP_QAT_FW_COMP_BFINAL,
847 				ICP_QAT_FW_COMP_CNV,
848 				ICP_QAT_FW_COMP_CNV_RECOVERY);
849 	} else {
850 		if (xform->compress.level == RTE_COMP_LEVEL_PMD_DEFAULT)
851 			comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
852 		else if (xform->compress.level == 1)
853 			comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
854 		else if (xform->compress.level == 2)
855 			comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_4;
856 		else if (xform->compress.level == 3)
857 			comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
858 		else if (xform->compress.level >= 4 &&
859 			 xform->compress.level <= 9)
860 			comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_16;
861 		else {
862 			QAT_LOG(ERR, "compression level not supported");
863 			return -EINVAL;
864 		}
865 		req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
866 				ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
867 				ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_CNV,
868 				ICP_QAT_FW_COMP_CNV_RECOVERY);
869 	}
870 
871 	switch (xform->compress.algo) {
872 	case RTE_COMP_ALGO_DEFLATE:
873 		algo = ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE;
874 		break;
875 	case RTE_COMP_ALGO_LZS:
876 	default:
877 		/* RTE_COMP_NULL */
878 		QAT_LOG(ERR, "compression algorithm not supported");
879 		return -EINVAL;
880 	}
881 
882 	comp_req = &qat_xform->qat_comp_req_tmpl;
883 
884 	/* Initialize header */
885 	qat_comp_create_req_hdr(&comp_req->comn_hdr,
886 					qat_xform->qat_comp_request_type);
887 
888 	if (op_type == RTE_COMP_OP_STATEFUL) {
889 		comp_req->comn_hdr.serv_specif_flags =
890 				ICP_QAT_FW_COMP_FLAGS_BUILD(
891 			ICP_QAT_FW_COMP_STATEFUL_SESSION,
892 			ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
893 			ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
894 			ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
895 			ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
896 
897 		/* Decompression state registers */
898 		comp_req->comp_cd_ctrl.comp_state_addr =
899 				stream->state_registers_decomp_phys;
900 
901 		/* Enable A, B, C, D, and E (CAMs). */
902 		comp_req->comp_cd_ctrl.ram_bank_flags =
903 			ICP_QAT_FW_COMP_RAM_FLAGS_BUILD(
904 				ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank I */
905 				ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank H */
906 				ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank G */
907 				ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank F */
908 				ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank E */
909 				ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank D */
910 				ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank C */
911 				ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank B */
912 				ICP_QAT_FW_COMP_BANK_ENABLED); /* Bank A */
913 
914 		comp_req->comp_cd_ctrl.ram_banks_addr =
915 				stream->inflate_context_phys;
916 	} else {
917 		comp_req->comn_hdr.serv_specif_flags =
918 				ICP_QAT_FW_COMP_FLAGS_BUILD(
919 			ICP_QAT_FW_COMP_STATELESS_SESSION,
920 			ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
921 			ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
922 			ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
923 			ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
924 	}
925 
926 	comp_req->cd_pars.sl.comp_slice_cfg_word[0] =
927 	    ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(
928 		direction,
929 		/* In CPM 1.6 only valid mode ! */
930 		ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED, algo,
931 		/* Translate level to depth */
932 		comp_level, ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
933 
934 	comp_req->comp_pars.initial_adler = 1;
935 	comp_req->comp_pars.initial_crc32 = 0;
936 	comp_req->comp_pars.req_par_flags = req_par_flags;
937 
938 
939 	if (qat_xform->qat_comp_request_type ==
940 			QAT_COMP_REQUEST_FIXED_COMP_STATELESS ||
941 	    qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
942 		ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
943 					    ICP_QAT_FW_SLICE_DRAM_WR);
944 		ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
945 					    ICP_QAT_FW_SLICE_COMP);
946 	} else if (qat_xform->qat_comp_request_type ==
947 			QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS) {
948 
949 		ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
950 				ICP_QAT_FW_SLICE_XLAT);
951 		ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
952 				ICP_QAT_FW_SLICE_COMP);
953 
954 		ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->u2.xlt_cd_ctrl,
955 				ICP_QAT_FW_SLICE_DRAM_WR);
956 		ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->u2.xlt_cd_ctrl,
957 				ICP_QAT_FW_SLICE_XLAT);
958 
959 		comp_req->u1.xlt_pars.inter_buff_ptr =
960 				interm_buff_mz->iova;
961 	}
962 
963 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
964 	QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message template:", comp_req,
965 		    sizeof(struct icp_qat_fw_comp_req));
966 #endif
967 	return 0;
968 }
969 
970 /**
971  * Create driver private_xform data.
972  *
973  * @param dev
974  *   Compressdev device
975  * @param xform
976  *   xform data from application
977  * @param private_xform
978  *   ptr where handle of pmd's private_xform data should be stored
979  * @return
980  *  - if successful returns 0
981  *    and valid private_xform handle
982  *  - <0 in error cases
983  *  - Returns -EINVAL if input parameters are invalid.
984  *  - Returns -ENOTSUP if comp device does not support the comp transform.
985  *  - Returns -ENOMEM if the private_xform could not be allocated.
986  */
987 int
988 qat_comp_private_xform_create(struct rte_compressdev *dev,
989 			      const struct rte_comp_xform *xform,
990 			      void **private_xform)
991 {
992 	struct qat_comp_dev_private *qat = dev->data->dev_private;
993 
994 	if (unlikely(private_xform == NULL)) {
995 		QAT_LOG(ERR, "QAT: private_xform parameter is NULL");
996 		return -EINVAL;
997 	}
998 	if (unlikely(qat->xformpool == NULL)) {
999 		QAT_LOG(ERR, "QAT device has no private_xform mempool");
1000 		return -ENOMEM;
1001 	}
1002 	if (rte_mempool_get(qat->xformpool, private_xform)) {
1003 		QAT_LOG(ERR, "Couldn't get object from qat xform mempool");
1004 		return -ENOMEM;
1005 	}
1006 
1007 	struct qat_comp_xform *qat_xform =
1008 			(struct qat_comp_xform *)*private_xform;
1009 
1010 	if (xform->type == RTE_COMP_COMPRESS) {
1011 
1012 		if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED ||
1013 		  ((xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_DEFAULT)
1014 				   && qat->interm_buff_mz == NULL))
1015 			qat_xform->qat_comp_request_type =
1016 					QAT_COMP_REQUEST_FIXED_COMP_STATELESS;
1017 
1018 		else if ((xform->compress.deflate.huffman ==
1019 				RTE_COMP_HUFFMAN_DYNAMIC ||
1020 				xform->compress.deflate.huffman ==
1021 						RTE_COMP_HUFFMAN_DEFAULT) &&
1022 				qat->interm_buff_mz != NULL)
1023 
1024 			qat_xform->qat_comp_request_type =
1025 					QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS;
1026 
1027 		else {
1028 			QAT_LOG(ERR,
1029 					"IM buffers needed for dynamic deflate. Set size in config file");
1030 			return -EINVAL;
1031 		}
1032 
1033 		qat_xform->checksum_type = xform->compress.chksum;
1034 
1035 	} else {
1036 		qat_xform->qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
1037 		qat_xform->checksum_type = xform->decompress.chksum;
1038 	}
1039 
1040 	if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform,
1041 				      NULL, RTE_COMP_OP_STATELESS)) {
1042 		QAT_LOG(ERR, "QAT: Problem with setting compression");
1043 		return -EINVAL;
1044 	}
1045 	return 0;
1046 }
1047 
1048 /**
1049  * Free driver private_xform data.
1050  *
1051  * @param dev
1052  *   Compressdev device
1053  * @param private_xform
1054  *   handle of pmd's private_xform data
1055  * @return
1056  *  - 0 if successful
1057  *  - <0 in error cases
1058  *  - Returns -EINVAL if input parameters are invalid.
1059  */
1060 int
1061 qat_comp_private_xform_free(struct rte_compressdev *dev __rte_unused,
1062 			    void *private_xform)
1063 {
1064 	struct qat_comp_xform *qat_xform =
1065 			(struct qat_comp_xform *)private_xform;
1066 
1067 	if (qat_xform) {
1068 		memset(qat_xform, 0, qat_comp_xform_size());
1069 		struct rte_mempool *mp = rte_mempool_from_obj(qat_xform);
1070 
1071 		rte_mempool_put(mp, qat_xform);
1072 		return 0;
1073 	}
1074 	return -EINVAL;
1075 }
1076 
1077 /**
1078  * Reset stream state for the next use.
1079  *
1080  * @param stream
1081  *   handle of pmd's private stream data
1082  */
1083 static void
1084 qat_comp_stream_reset(struct qat_comp_stream *stream)
1085 {
1086 	if (stream) {
1087 		memset(&stream->qat_xform, 0, sizeof(struct qat_comp_xform));
1088 		stream->start_of_packet = 1;
1089 		stream->op_in_progress = 0;
1090 	}
1091 }
1092 
1093 /**
1094  * Create driver private stream data.
1095  *
1096  * @param dev
1097  *   Compressdev device
1098  * @param xform
1099  *   xform data
1100  * @param stream
1101  *   ptr where handle of pmd's private stream data should be stored
1102  * @return
1103  *  - Returns 0 if private stream structure has been created successfully.
1104  *  - Returns -EINVAL if input parameters are invalid.
1105  *  - Returns -ENOTSUP if comp device does not support STATEFUL operations.
1106  *  - Returns -ENOTSUP if comp device does not support the comp transform.
1107  *  - Returns -ENOMEM if the private stream could not be allocated.
1108  */
1109 int
1110 qat_comp_stream_create(struct rte_compressdev *dev,
1111 		       const struct rte_comp_xform *xform,
1112 		       void **stream)
1113 {
1114 	struct qat_comp_dev_private *qat = dev->data->dev_private;
1115 	struct qat_comp_stream *ptr;
1116 
1117 	if (unlikely(stream == NULL)) {
1118 		QAT_LOG(ERR, "QAT: stream parameter is NULL");
1119 		return -EINVAL;
1120 	}
1121 	if (unlikely(xform->type == RTE_COMP_COMPRESS)) {
1122 		QAT_LOG(ERR, "QAT: stateful compression not supported");
1123 		return -ENOTSUP;
1124 	}
1125 	if (unlikely(qat->streampool == NULL)) {
1126 		QAT_LOG(ERR, "QAT device has no stream mempool");
1127 		return -ENOMEM;
1128 	}
1129 	if (rte_mempool_get(qat->streampool, stream)) {
1130 		QAT_LOG(ERR, "Couldn't get object from qat stream mempool");
1131 		return -ENOMEM;
1132 	}
1133 
1134 	ptr = (struct qat_comp_stream *) *stream;
1135 	qat_comp_stream_reset(ptr);
1136 	ptr->qat_xform.qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
1137 	ptr->qat_xform.checksum_type = xform->decompress.chksum;
1138 
1139 	if (qat_comp_create_templates(&ptr->qat_xform, qat->interm_buff_mz,
1140 				      xform, ptr, RTE_COMP_OP_STATEFUL)) {
1141 		QAT_LOG(ERR, "QAT: problem with creating descriptor template for stream");
1142 		rte_mempool_put(qat->streampool, *stream);
1143 		*stream = NULL;
1144 		return -EINVAL;
1145 	}
1146 
1147 	return 0;
1148 }
1149 
1150 /**
1151  * Free driver private stream data.
1152  *
1153  * @param dev
1154  *   Compressdev device
1155  * @param stream
1156  *   handle of pmd's private stream data
1157  * @return
1158  *  - 0 if successful
1159  *  - <0 in error cases
1160  *  - Returns -EINVAL if input parameters are invalid.
1161  *  - Returns -ENOTSUP if comp device does not support STATEFUL operations.
1162  *  - Returns -EBUSY if can't free stream as there are inflight operations
1163  */
1164 int
1165 qat_comp_stream_free(struct rte_compressdev *dev, void *stream)
1166 {
1167 	if (stream) {
1168 		struct qat_comp_dev_private *qat = dev->data->dev_private;
1169 		qat_comp_stream_reset((struct qat_comp_stream *) stream);
1170 		rte_mempool_put(qat->streampool, stream);
1171 		return 0;
1172 	}
1173 	return -EINVAL;
1174 }
1175