xref: /dpdk/drivers/compress/qat/qat_comp.c (revision c7f5dba7d4bb7971fac51755aad09b71b10cef90)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 
5 #include <rte_mempool.h>
6 #include <rte_mbuf.h>
7 #include <rte_hexdump.h>
8 #include <rte_comp.h>
9 #include <rte_bus_pci.h>
10 #include <rte_byteorder.h>
11 #include <rte_memcpy.h>
12 #include <rte_common.h>
13 #include <rte_spinlock.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 
17 #include "qat_logs.h"
18 #include "qat_comp.h"
19 #include "qat_comp_pmd.h"
20 
21 
22 int
23 qat_comp_build_request(void *in_op, uint8_t *out_msg,
24 		       void *op_cookie,
25 		       enum qat_device_gen qat_dev_gen __rte_unused)
26 {
27 	struct rte_comp_op *op = in_op;
28 	struct qat_comp_op_cookie *cookie =
29 			(struct qat_comp_op_cookie *)op_cookie;
30 	struct qat_comp_xform *qat_xform = op->private_xform;
31 	const uint8_t *tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
32 	struct icp_qat_fw_comp_req *comp_req =
33 	    (struct icp_qat_fw_comp_req *)out_msg;
34 
35 	if (unlikely(op->op_type != RTE_COMP_OP_STATELESS)) {
36 		QAT_DP_LOG(ERR, "QAT PMD only supports stateless compression "
37 				"operation requests, op (%p) is not a "
38 				"stateless operation.", op);
39 		return -EINVAL;
40 	}
41 
42 	rte_mov128(out_msg, tmpl);
43 	comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
44 
45 	/* common for sgl and flat buffers */
46 	comp_req->comp_pars.comp_len = op->src.length;
47 	comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst) -
48 			op->dst.offset;
49 
50 	if (op->m_src->next != NULL || op->m_dst->next != NULL) {
51 		/* sgl */
52 		int ret = 0;
53 
54 		ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
55 				QAT_COMN_PTR_TYPE_SGL);
56 
57 		ret = qat_sgl_fill_array(op->m_src,
58 				op->src.offset,
59 				&cookie->qat_sgl_src,
60 				op->src.length,
61 				RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
62 		if (ret) {
63 			QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array");
64 			return ret;
65 		}
66 
67 		ret = qat_sgl_fill_array(op->m_dst,
68 				op->dst.offset,
69 				&cookie->qat_sgl_dst,
70 				comp_req->comp_pars.out_buffer_sz,
71 				RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
72 		if (ret) {
73 			QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array");
74 			return ret;
75 		}
76 
77 		comp_req->comn_mid.src_data_addr =
78 				cookie->qat_sgl_src_phys_addr;
79 		comp_req->comn_mid.dest_data_addr =
80 				cookie->qat_sgl_dst_phys_addr;
81 		comp_req->comn_mid.src_length = 0;
82 		comp_req->comn_mid.dst_length = 0;
83 
84 	} else {
85 		/* flat aka linear buffer */
86 		ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
87 				QAT_COMN_PTR_TYPE_FLAT);
88 		comp_req->comn_mid.src_length = op->src.length;
89 		comp_req->comn_mid.dst_length =
90 				comp_req->comp_pars.out_buffer_sz;
91 
92 		comp_req->comn_mid.src_data_addr =
93 		    rte_pktmbuf_mtophys_offset(op->m_src, op->src.offset);
94 		comp_req->comn_mid.dest_data_addr =
95 		    rte_pktmbuf_mtophys_offset(op->m_dst, op->dst.offset);
96 	}
97 
98 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
99 	QAT_DP_LOG(DEBUG, "Direction: %s",
100 	    qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
101 			    "decompression" : "compression");
102 	QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message:", comp_req,
103 		    sizeof(struct icp_qat_fw_comp_req));
104 #endif
105 	return 0;
106 }
107 
108 int
109 qat_comp_process_response(void **op, uint8_t *resp)
110 {
111 	struct icp_qat_fw_comp_resp *resp_msg =
112 			(struct icp_qat_fw_comp_resp *)resp;
113 	struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t)
114 			(resp_msg->opaque_data);
115 	struct qat_comp_xform *qat_xform = (struct qat_comp_xform *)
116 				(rx_op->private_xform);
117 
118 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
119 	QAT_DP_LOG(DEBUG, "Direction: %s",
120 	    qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
121 	    "decompression" : "compression");
122 	QAT_DP_HEXDUMP_LOG(DEBUG,  "qat_response:", (uint8_t *)resp_msg,
123 			sizeof(struct icp_qat_fw_comp_resp));
124 #endif
125 
126 	if (likely(qat_xform->qat_comp_request_type
127 			!= QAT_COMP_REQUEST_DECOMPRESS)) {
128 		if (unlikely(ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(
129 				resp_msg->comn_resp.hdr_flags)
130 					== ICP_QAT_FW_COMP_NO_CNV)) {
131 			rx_op->status = RTE_COMP_OP_STATUS_ERROR;
132 			rx_op->debug_status = ERR_CODE_QAT_COMP_WRONG_FW;
133 			*op = (void *)rx_op;
134 			QAT_DP_LOG(ERR, "QAT has wrong firmware");
135 			return 0;
136 		}
137 	}
138 
139 	if ((ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(resp_msg->comn_resp.comn_status)
140 		| ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(
141 				resp_msg->comn_resp.comn_status)) !=
142 				ICP_QAT_FW_COMN_STATUS_FLAG_OK) {
143 
144 		rx_op->status = RTE_COMP_OP_STATUS_ERROR;
145 		rx_op->debug_status =
146 			*((uint16_t *)(&resp_msg->comn_resp.comn_error));
147 	} else {
148 		struct icp_qat_fw_resp_comp_pars *comp_resp =
149 		  (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars;
150 
151 		rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
152 		rx_op->consumed = comp_resp->input_byte_counter;
153 		rx_op->produced = comp_resp->output_byte_counter;
154 
155 		if (qat_xform->checksum_type != RTE_COMP_CHECKSUM_NONE) {
156 			if (qat_xform->checksum_type == RTE_COMP_CHECKSUM_CRC32)
157 				rx_op->output_chksum = comp_resp->curr_crc32;
158 			else if (qat_xform->checksum_type ==
159 					RTE_COMP_CHECKSUM_ADLER32)
160 				rx_op->output_chksum = comp_resp->curr_adler_32;
161 			else
162 				rx_op->output_chksum = comp_resp->curr_chksum;
163 		}
164 	}
165 	*op = (void *)rx_op;
166 
167 	return 0;
168 }
169 
170 unsigned int
171 qat_comp_xform_size(void)
172 {
173 	return RTE_ALIGN_CEIL(sizeof(struct qat_comp_xform), 8);
174 }
175 
176 static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
177 				    enum qat_comp_request_type request)
178 {
179 	if (request == QAT_COMP_REQUEST_FIXED_COMP_STATELESS)
180 		header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
181 	else if (request == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)
182 		header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
183 	else if (request == QAT_COMP_REQUEST_DECOMPRESS)
184 		header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
185 
186 	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
187 	header->hdr_flags =
188 	    ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
189 
190 	header->comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD(
191 	    QAT_COMN_CD_FLD_TYPE_16BYTE_DATA, QAT_COMN_PTR_TYPE_FLAT);
192 }
193 
194 static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
195 			const struct rte_memzone *interm_buff_mz __rte_unused,
196 			const struct rte_comp_xform *xform)
197 {
198 	struct icp_qat_fw_comp_req *comp_req;
199 	int comp_level, algo;
200 	uint32_t req_par_flags;
201 	int direction = ICP_QAT_HW_COMPRESSION_DIR_COMPRESS;
202 
203 	if (unlikely(qat_xform == NULL)) {
204 		QAT_LOG(ERR, "Session was not created for this device");
205 		return -EINVAL;
206 	}
207 
208 	if (qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
209 		direction = ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS;
210 		comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
211 		req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
212 				ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
213 				ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_NO_CNV,
214 				ICP_QAT_FW_COMP_NO_CNV_RECOVERY);
215 
216 	} else {
217 		if (xform->compress.level == RTE_COMP_LEVEL_PMD_DEFAULT)
218 			comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
219 		else if (xform->compress.level == 1)
220 			comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
221 		else if (xform->compress.level == 2)
222 			comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_4;
223 		else if (xform->compress.level == 3)
224 			comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
225 		else if (xform->compress.level >= 4 &&
226 			 xform->compress.level <= 9)
227 			comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_16;
228 		else {
229 			QAT_LOG(ERR, "compression level not supported");
230 			return -EINVAL;
231 		}
232 		req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
233 				ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
234 				ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_CNV,
235 				ICP_QAT_FW_COMP_CNV_RECOVERY);
236 	}
237 
238 	switch (xform->compress.algo) {
239 	case RTE_COMP_ALGO_DEFLATE:
240 		algo = ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE;
241 		break;
242 	case RTE_COMP_ALGO_LZS:
243 	default:
244 		/* RTE_COMP_NULL */
245 		QAT_LOG(ERR, "compression algorithm not supported");
246 		return -EINVAL;
247 	}
248 
249 	comp_req = &qat_xform->qat_comp_req_tmpl;
250 
251 	/* Initialize header */
252 	qat_comp_create_req_hdr(&comp_req->comn_hdr,
253 					qat_xform->qat_comp_request_type);
254 
255 	comp_req->comn_hdr.serv_specif_flags = ICP_QAT_FW_COMP_FLAGS_BUILD(
256 	    ICP_QAT_FW_COMP_STATELESS_SESSION,
257 	    ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
258 	    ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
259 	    ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
260 	    ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF);
261 
262 	comp_req->cd_pars.sl.comp_slice_cfg_word[0] =
263 	    ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(
264 		direction,
265 		/* In CPM 1.6 only valid mode ! */
266 		ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED, algo,
267 		/* Translate level to depth */
268 		comp_level, ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
269 
270 	comp_req->comp_pars.initial_adler = 1;
271 	comp_req->comp_pars.initial_crc32 = 0;
272 	comp_req->comp_pars.req_par_flags = req_par_flags;
273 
274 
275 	if (qat_xform->qat_comp_request_type ==
276 			QAT_COMP_REQUEST_FIXED_COMP_STATELESS ||
277 	    qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
278 		ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
279 					    ICP_QAT_FW_SLICE_DRAM_WR);
280 		ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
281 					    ICP_QAT_FW_SLICE_COMP);
282 	} else if (qat_xform->qat_comp_request_type ==
283 		   QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS) {
284 
285 		QAT_LOG(ERR, "Dynamic huffman encoding not supported");
286 		return -EINVAL;
287 	}
288 
289 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
290 	QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message template:", comp_req,
291 		    sizeof(struct icp_qat_fw_comp_req));
292 #endif
293 	return 0;
294 }
295 
296 /**
297  * Create driver private_xform data.
298  *
299  * @param dev
300  *   Compressdev device
301  * @param xform
302  *   xform data from application
303  * @param private_xform
304  *   ptr where handle of pmd's private_xform data should be stored
305  * @return
306  *  - if successful returns 0
307  *    and valid private_xform handle
308  *  - <0 in error cases
309  *  - Returns -EINVAL if input parameters are invalid.
310  *  - Returns -ENOTSUP if comp device does not support the comp transform.
311  *  - Returns -ENOMEM if the private_xform could not be allocated.
312  */
313 int
314 qat_comp_private_xform_create(struct rte_compressdev *dev,
315 			      const struct rte_comp_xform *xform,
316 			      void **private_xform)
317 {
318 	struct qat_comp_dev_private *qat = dev->data->dev_private;
319 
320 	if (unlikely(private_xform == NULL)) {
321 		QAT_LOG(ERR, "QAT: private_xform parameter is NULL");
322 		return -EINVAL;
323 	}
324 	if (unlikely(qat->xformpool == NULL)) {
325 		QAT_LOG(ERR, "QAT device has no private_xform mempool");
326 		return -ENOMEM;
327 	}
328 	if (rte_mempool_get(qat->xformpool, private_xform)) {
329 		QAT_LOG(ERR, "Couldn't get object from qat xform mempool");
330 		return -ENOMEM;
331 	}
332 
333 	struct qat_comp_xform *qat_xform =
334 			(struct qat_comp_xform *)*private_xform;
335 
336 	if (xform->type == RTE_COMP_COMPRESS) {
337 		if (xform->compress.deflate.huffman ==
338 				RTE_COMP_HUFFMAN_DYNAMIC) {
339 			QAT_LOG(ERR,
340 			"QAT device doesn't support dynamic compression");
341 			return -ENOTSUP;
342 		}
343 
344 		if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED ||
345 		  ((xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_DEFAULT)
346 				   && qat->interm_buff_mz == NULL))
347 			qat_xform->qat_comp_request_type =
348 					QAT_COMP_REQUEST_FIXED_COMP_STATELESS;
349 
350 		qat_xform->checksum_type = xform->compress.chksum;
351 
352 	} else {
353 		qat_xform->qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
354 		qat_xform->checksum_type = xform->decompress.chksum;
355 	}
356 
357 	if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform)) {
358 		QAT_LOG(ERR, "QAT: Problem with setting compression");
359 		return -EINVAL;
360 	}
361 	return 0;
362 }
363 
364 /**
365  * Free driver private_xform data.
366  *
367  * @param dev
368  *   Compressdev device
369  * @param private_xform
370  *   handle of pmd's private_xform data
371  * @return
372  *  - 0 if successful
373  *  - <0 in error cases
374  *  - Returns -EINVAL if input parameters are invalid.
375  */
376 int
377 qat_comp_private_xform_free(struct rte_compressdev *dev __rte_unused,
378 			    void *private_xform)
379 {
380 	struct qat_comp_xform *qat_xform =
381 			(struct qat_comp_xform *)private_xform;
382 
383 	if (qat_xform) {
384 		memset(qat_xform, 0, qat_comp_xform_size());
385 		struct rte_mempool *mp = rte_mempool_from_obj(qat_xform);
386 
387 		rte_mempool_put(mp, qat_xform);
388 		return 0;
389 	}
390 	return -EINVAL;
391 }
392