xref: /dpdk/drivers/compress/qat/qat_comp.c (revision 200bc52e5aa0d72e70464c9cd22b55cf536ed13c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Intel Corporation
3  */
4 
5 #include <rte_mempool.h>
6 #include <rte_mbuf.h>
7 #include <rte_hexdump.h>
8 #include <rte_comp.h>
9 #include <rte_bus_pci.h>
10 #include <rte_byteorder.h>
11 #include <rte_memcpy.h>
12 #include <rte_common.h>
13 #include <rte_spinlock.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 
17 #include "qat_logs.h"
18 #include "qat_comp.h"
19 #include "qat_comp_pmd.h"
20 
21 
22 int
23 qat_comp_build_request(void *in_op, uint8_t *out_msg,
24 		       void *op_cookie,
25 		       enum qat_device_gen qat_dev_gen __rte_unused)
26 {
27 	struct rte_comp_op *op = in_op;
28 	struct qat_comp_op_cookie *cookie =
29 			(struct qat_comp_op_cookie *)op_cookie;
30 	struct qat_comp_xform *qat_xform = op->private_xform;
31 	const uint8_t *tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
32 	struct icp_qat_fw_comp_req *comp_req =
33 	    (struct icp_qat_fw_comp_req *)out_msg;
34 
35 	if (unlikely(op->op_type != RTE_COMP_OP_STATELESS)) {
36 		QAT_DP_LOG(ERR, "QAT PMD only supports stateless compression "
37 				"operation requests, op (%p) is not a "
38 				"stateless operation.", op);
39 		op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
40 		return -EINVAL;
41 	}
42 
43 	rte_mov128(out_msg, tmpl);
44 	comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
45 
46 	if (likely(qat_xform->qat_comp_request_type ==
47 		    QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) {
48 		if (unlikely(op->src.length > QAT_FALLBACK_THLD)) {
49 
50 			/* fallback to fixed compression */
51 			comp_req->comn_hdr.service_cmd_id =
52 					ICP_QAT_FW_COMP_CMD_STATIC;
53 
54 			ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
55 					ICP_QAT_FW_SLICE_DRAM_WR);
56 
57 			ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->u2.xlt_cd_ctrl,
58 					ICP_QAT_FW_SLICE_NULL);
59 			ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->u2.xlt_cd_ctrl,
60 					ICP_QAT_FW_SLICE_NULL);
61 
62 			QAT_DP_LOG(DEBUG, "QAT PMD: fallback to fixed "
63 				   "compression! IM buffer size can be too low "
64 				   "for produced data.\n Please use input "
65 				   "buffer length lower than %d bytes",
66 				   QAT_FALLBACK_THLD);
67 		}
68 	}
69 
70 	/* common for sgl and flat buffers */
71 	comp_req->comp_pars.comp_len = op->src.length;
72 	comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst) -
73 			op->dst.offset;
74 
75 	if (op->m_src->next != NULL || op->m_dst->next != NULL) {
76 		/* sgl */
77 		int ret = 0;
78 
79 		ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
80 				QAT_COMN_PTR_TYPE_SGL);
81 
82 		if (unlikely(op->m_src->nb_segs > cookie->src_nb_elems)) {
83 			/* we need to allocate more elements in SGL*/
84 			void *tmp;
85 
86 			tmp = rte_realloc_socket(cookie->qat_sgl_src_d,
87 					  sizeof(struct qat_sgl) +
88 					  sizeof(struct qat_flat_buf) *
89 					  op->m_src->nb_segs, 64,
90 					  cookie->socket_id);
91 
92 			if (unlikely(tmp == NULL)) {
93 				QAT_DP_LOG(ERR, "QAT PMD can't allocate memory"
94 					   " for %d elements of SGL",
95 					   op->m_src->nb_segs);
96 				op->status = RTE_COMP_OP_STATUS_ERROR;
97 				return -ENOMEM;
98 			}
99 			/* new SGL is valid now */
100 			cookie->qat_sgl_src_d = (struct qat_sgl *)tmp;
101 			cookie->src_nb_elems = op->m_src->nb_segs;
102 			cookie->qat_sgl_src_phys_addr =
103 				rte_malloc_virt2iova(cookie->qat_sgl_src_d);
104 		}
105 
106 		ret = qat_sgl_fill_array(op->m_src,
107 				op->src.offset,
108 				cookie->qat_sgl_src_d,
109 				op->src.length,
110 				cookie->src_nb_elems);
111 		if (ret) {
112 			QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array");
113 			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
114 			return ret;
115 		}
116 
117 		if (unlikely(op->m_dst->nb_segs > cookie->dst_nb_elems)) {
118 			/* we need to allocate more elements in SGL*/
119 			struct qat_sgl *tmp;
120 
121 			tmp = rte_realloc_socket(cookie->qat_sgl_dst_d,
122 					  sizeof(struct qat_sgl) +
123 					  sizeof(struct qat_flat_buf) *
124 					  op->m_dst->nb_segs, 64,
125 					  cookie->socket_id);
126 
127 			if (unlikely(tmp == NULL)) {
128 				QAT_DP_LOG(ERR, "QAT PMD can't allocate memory"
129 					   " for %d elements of SGL",
130 					   op->m_dst->nb_segs);
131 				op->status = RTE_COMP_OP_STATUS_ERROR;
132 				return -ENOMEM;
133 			}
134 			/* new SGL is valid now */
135 			cookie->qat_sgl_dst_d = (struct qat_sgl *)tmp;
136 			cookie->dst_nb_elems = op->m_dst->nb_segs;
137 			cookie->qat_sgl_dst_phys_addr =
138 				rte_malloc_virt2iova(cookie->qat_sgl_dst_d);
139 		}
140 
141 		ret = qat_sgl_fill_array(op->m_dst,
142 				op->dst.offset,
143 				cookie->qat_sgl_dst_d,
144 				comp_req->comp_pars.out_buffer_sz,
145 				cookie->dst_nb_elems);
146 		if (ret) {
147 			QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array");
148 			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
149 			return ret;
150 		}
151 
152 		comp_req->comn_mid.src_data_addr =
153 				cookie->qat_sgl_src_phys_addr;
154 		comp_req->comn_mid.dest_data_addr =
155 				cookie->qat_sgl_dst_phys_addr;
156 		comp_req->comn_mid.src_length = 0;
157 		comp_req->comn_mid.dst_length = 0;
158 
159 	} else {
160 		/* flat aka linear buffer */
161 		ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
162 				QAT_COMN_PTR_TYPE_FLAT);
163 		comp_req->comn_mid.src_length = op->src.length;
164 		comp_req->comn_mid.dst_length =
165 				comp_req->comp_pars.out_buffer_sz;
166 
167 		comp_req->comn_mid.src_data_addr =
168 		    rte_pktmbuf_mtophys_offset(op->m_src, op->src.offset);
169 		comp_req->comn_mid.dest_data_addr =
170 		    rte_pktmbuf_mtophys_offset(op->m_dst, op->dst.offset);
171 	}
172 
173 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
174 	QAT_DP_LOG(DEBUG, "Direction: %s",
175 	    qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
176 			    "decompression" : "compression");
177 	QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message:", comp_req,
178 		    sizeof(struct icp_qat_fw_comp_req));
179 #endif
180 	return 0;
181 }
182 
183 int
184 qat_comp_process_response(void **op, uint8_t *resp, uint64_t *dequeue_err_count)
185 {
186 	struct icp_qat_fw_comp_resp *resp_msg =
187 			(struct icp_qat_fw_comp_resp *)resp;
188 	struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t)
189 			(resp_msg->opaque_data);
190 	struct qat_comp_xform *qat_xform = (struct qat_comp_xform *)
191 				(rx_op->private_xform);
192 	int err = resp_msg->comn_resp.comn_status &
193 			((1 << QAT_COMN_RESP_CMP_STATUS_BITPOS) |
194 			 (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS));
195 
196 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
197 	QAT_DP_LOG(DEBUG, "Direction: %s",
198 	    qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
199 	    "decompression" : "compression");
200 	QAT_DP_HEXDUMP_LOG(DEBUG,  "qat_response:", (uint8_t *)resp_msg,
201 			sizeof(struct icp_qat_fw_comp_resp));
202 #endif
203 
204 	if (likely(qat_xform->qat_comp_request_type
205 			!= QAT_COMP_REQUEST_DECOMPRESS)) {
206 		if (unlikely(ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(
207 				resp_msg->comn_resp.hdr_flags)
208 					== ICP_QAT_FW_COMP_NO_CNV)) {
209 			rx_op->status = RTE_COMP_OP_STATUS_ERROR;
210 			rx_op->debug_status = ERR_CODE_QAT_COMP_WRONG_FW;
211 			*op = (void *)rx_op;
212 			QAT_DP_LOG(ERR, "QAT has wrong firmware");
213 			++(*dequeue_err_count);
214 			return 0;
215 		}
216 	}
217 
218 	if (err) {
219 		if (unlikely((err & (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS))
220 			     &&	(qat_xform->qat_comp_request_type
221 				 == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS))) {
222 			QAT_DP_LOG(ERR, "QAT intermediate buffer may be too "
223 			    "small for output, try configuring a larger size");
224 		}
225 
226 		int8_t cmp_err_code =
227 			(int8_t)resp_msg->comn_resp.comn_error.cmp_err_code;
228 		int8_t xlat_err_code =
229 			(int8_t)resp_msg->comn_resp.comn_error.xlat_err_code;
230 
231 		if ((cmp_err_code == ERR_CODE_OVERFLOW_ERROR && !xlat_err_code)
232 				||
233 		    (!cmp_err_code && xlat_err_code == ERR_CODE_OVERFLOW_ERROR)
234 				||
235 		    (cmp_err_code == ERR_CODE_OVERFLOW_ERROR &&
236 		     xlat_err_code == ERR_CODE_OVERFLOW_ERROR))
237 			rx_op->status =
238 				RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
239 		else
240 			rx_op->status = RTE_COMP_OP_STATUS_ERROR;
241 
242 		++(*dequeue_err_count);
243 		rx_op->debug_status =
244 			*((uint16_t *)(&resp_msg->comn_resp.comn_error));
245 	} else {
246 		struct icp_qat_fw_resp_comp_pars *comp_resp =
247 		  (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars;
248 
249 		rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
250 		rx_op->consumed = comp_resp->input_byte_counter;
251 		rx_op->produced = comp_resp->output_byte_counter;
252 
253 		if (qat_xform->checksum_type != RTE_COMP_CHECKSUM_NONE) {
254 			if (qat_xform->checksum_type == RTE_COMP_CHECKSUM_CRC32)
255 				rx_op->output_chksum = comp_resp->curr_crc32;
256 			else if (qat_xform->checksum_type ==
257 					RTE_COMP_CHECKSUM_ADLER32)
258 				rx_op->output_chksum = comp_resp->curr_adler_32;
259 			else
260 				rx_op->output_chksum = comp_resp->curr_chksum;
261 		}
262 	}
263 	*op = (void *)rx_op;
264 
265 	return 0;
266 }
267 
268 unsigned int
269 qat_comp_xform_size(void)
270 {
271 	return RTE_ALIGN_CEIL(sizeof(struct qat_comp_xform), 8);
272 }
273 
274 static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
275 				    enum qat_comp_request_type request)
276 {
277 	if (request == QAT_COMP_REQUEST_FIXED_COMP_STATELESS)
278 		header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
279 	else if (request == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)
280 		header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
281 	else if (request == QAT_COMP_REQUEST_DECOMPRESS)
282 		header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
283 
284 	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
285 	header->hdr_flags =
286 	    ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
287 
288 	header->comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD(
289 	    QAT_COMN_CD_FLD_TYPE_16BYTE_DATA, QAT_COMN_PTR_TYPE_FLAT);
290 }
291 
292 static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
293 			const struct rte_memzone *interm_buff_mz,
294 			const struct rte_comp_xform *xform)
295 {
296 	struct icp_qat_fw_comp_req *comp_req;
297 	int comp_level, algo;
298 	uint32_t req_par_flags;
299 	int direction = ICP_QAT_HW_COMPRESSION_DIR_COMPRESS;
300 
301 	if (unlikely(qat_xform == NULL)) {
302 		QAT_LOG(ERR, "Session was not created for this device");
303 		return -EINVAL;
304 	}
305 
306 	if (qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
307 		direction = ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS;
308 		comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
309 		req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
310 				ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
311 				ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_NO_CNV,
312 				ICP_QAT_FW_COMP_NO_CNV_RECOVERY);
313 
314 	} else {
315 		if (xform->compress.level == RTE_COMP_LEVEL_PMD_DEFAULT)
316 			comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
317 		else if (xform->compress.level == 1)
318 			comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
319 		else if (xform->compress.level == 2)
320 			comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_4;
321 		else if (xform->compress.level == 3)
322 			comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
323 		else if (xform->compress.level >= 4 &&
324 			 xform->compress.level <= 9)
325 			comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_16;
326 		else {
327 			QAT_LOG(ERR, "compression level not supported");
328 			return -EINVAL;
329 		}
330 		req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
331 				ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
332 				ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_CNV,
333 				ICP_QAT_FW_COMP_CNV_RECOVERY);
334 	}
335 
336 	switch (xform->compress.algo) {
337 	case RTE_COMP_ALGO_DEFLATE:
338 		algo = ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE;
339 		break;
340 	case RTE_COMP_ALGO_LZS:
341 	default:
342 		/* RTE_COMP_NULL */
343 		QAT_LOG(ERR, "compression algorithm not supported");
344 		return -EINVAL;
345 	}
346 
347 	comp_req = &qat_xform->qat_comp_req_tmpl;
348 
349 	/* Initialize header */
350 	qat_comp_create_req_hdr(&comp_req->comn_hdr,
351 					qat_xform->qat_comp_request_type);
352 
353 	comp_req->comn_hdr.serv_specif_flags = ICP_QAT_FW_COMP_FLAGS_BUILD(
354 	    ICP_QAT_FW_COMP_STATELESS_SESSION,
355 	    ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
356 	    ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
357 	    ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
358 	    ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
359 
360 	comp_req->cd_pars.sl.comp_slice_cfg_word[0] =
361 	    ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(
362 		direction,
363 		/* In CPM 1.6 only valid mode ! */
364 		ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED, algo,
365 		/* Translate level to depth */
366 		comp_level, ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
367 
368 	comp_req->comp_pars.initial_adler = 1;
369 	comp_req->comp_pars.initial_crc32 = 0;
370 	comp_req->comp_pars.req_par_flags = req_par_flags;
371 
372 
373 	if (qat_xform->qat_comp_request_type ==
374 			QAT_COMP_REQUEST_FIXED_COMP_STATELESS ||
375 	    qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
376 		ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
377 					    ICP_QAT_FW_SLICE_DRAM_WR);
378 		ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
379 					    ICP_QAT_FW_SLICE_COMP);
380 	} else if (qat_xform->qat_comp_request_type ==
381 			QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS) {
382 
383 		ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
384 				ICP_QAT_FW_SLICE_XLAT);
385 		ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
386 				ICP_QAT_FW_SLICE_COMP);
387 
388 		ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->u2.xlt_cd_ctrl,
389 				ICP_QAT_FW_SLICE_DRAM_WR);
390 		ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->u2.xlt_cd_ctrl,
391 				ICP_QAT_FW_SLICE_XLAT);
392 
393 		comp_req->u1.xlt_pars.inter_buff_ptr =
394 				interm_buff_mz->phys_addr;
395 	}
396 
397 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
398 	QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message template:", comp_req,
399 		    sizeof(struct icp_qat_fw_comp_req));
400 #endif
401 	return 0;
402 }
403 
404 /**
405  * Create driver private_xform data.
406  *
407  * @param dev
408  *   Compressdev device
409  * @param xform
410  *   xform data from application
411  * @param private_xform
412  *   ptr where handle of pmd's private_xform data should be stored
413  * @return
414  *  - if successful returns 0
415  *    and valid private_xform handle
416  *  - <0 in error cases
417  *  - Returns -EINVAL if input parameters are invalid.
418  *  - Returns -ENOTSUP if comp device does not support the comp transform.
419  *  - Returns -ENOMEM if the private_xform could not be allocated.
420  */
421 int
422 qat_comp_private_xform_create(struct rte_compressdev *dev,
423 			      const struct rte_comp_xform *xform,
424 			      void **private_xform)
425 {
426 	struct qat_comp_dev_private *qat = dev->data->dev_private;
427 
428 	if (unlikely(private_xform == NULL)) {
429 		QAT_LOG(ERR, "QAT: private_xform parameter is NULL");
430 		return -EINVAL;
431 	}
432 	if (unlikely(qat->xformpool == NULL)) {
433 		QAT_LOG(ERR, "QAT device has no private_xform mempool");
434 		return -ENOMEM;
435 	}
436 	if (rte_mempool_get(qat->xformpool, private_xform)) {
437 		QAT_LOG(ERR, "Couldn't get object from qat xform mempool");
438 		return -ENOMEM;
439 	}
440 
441 	struct qat_comp_xform *qat_xform =
442 			(struct qat_comp_xform *)*private_xform;
443 
444 	if (xform->type == RTE_COMP_COMPRESS) {
445 
446 		if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED ||
447 		  ((xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_DEFAULT)
448 				   && qat->interm_buff_mz == NULL))
449 			qat_xform->qat_comp_request_type =
450 					QAT_COMP_REQUEST_FIXED_COMP_STATELESS;
451 
452 		else if ((xform->compress.deflate.huffman ==
453 				RTE_COMP_HUFFMAN_DYNAMIC ||
454 				xform->compress.deflate.huffman ==
455 						RTE_COMP_HUFFMAN_DEFAULT) &&
456 				qat->interm_buff_mz != NULL)
457 
458 			qat_xform->qat_comp_request_type =
459 					QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS;
460 
461 		else {
462 			QAT_LOG(ERR,
463 					"IM buffers needed for dynamic deflate. Set size in config file");
464 			return -EINVAL;
465 		}
466 
467 		qat_xform->checksum_type = xform->compress.chksum;
468 
469 	} else {
470 		qat_xform->qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
471 		qat_xform->checksum_type = xform->decompress.chksum;
472 	}
473 
474 	if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform)) {
475 		QAT_LOG(ERR, "QAT: Problem with setting compression");
476 		return -EINVAL;
477 	}
478 	return 0;
479 }
480 
481 /**
482  * Free driver private_xform data.
483  *
484  * @param dev
485  *   Compressdev device
486  * @param private_xform
487  *   handle of pmd's private_xform data
488  * @return
489  *  - 0 if successful
490  *  - <0 in error cases
491  *  - Returns -EINVAL if input parameters are invalid.
492  */
493 int
494 qat_comp_private_xform_free(struct rte_compressdev *dev __rte_unused,
495 			    void *private_xform)
496 {
497 	struct qat_comp_xform *qat_xform =
498 			(struct qat_comp_xform *)private_xform;
499 
500 	if (qat_xform) {
501 		memset(qat_xform, 0, qat_comp_xform_size());
502 		struct rte_mempool *mp = rte_mempool_from_obj(qat_xform);
503 
504 		rte_mempool_put(mp, qat_xform);
505 		return 0;
506 	}
507 	return -EINVAL;
508 }
509