xref: /dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c (revision 97b914f4e715565d53d38ac6e04815b9be5e58a9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2022 Intel Corporation
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <cryptodev_pmd.h>
7 #include "qat_sym_session.h"
8 #include "qat_sym.h"
9 #include "qat_asym.h"
10 #include "qat_crypto.h"
11 #include "qat_crypto_pmd_gens.h"
12 
13 static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen3[] = {
14 	QAT_SYM_PLAIN_AUTH_CAP(SHA1,
15 		CAP_SET(block_size, 64),
16 		CAP_RNG(digest_size, 1, 20, 1)),
17 	QAT_SYM_AEAD_CAP(AES_GCM,
18 		CAP_SET(block_size, 16),
19 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
20 		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)),
21 	QAT_SYM_AEAD_CAP(AES_CCM,
22 		CAP_SET(block_size, 16),
23 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2),
24 		CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)),
25 	QAT_SYM_AUTH_CAP(AES_GMAC,
26 		CAP_SET(block_size, 16),
27 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
28 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)),
29 	QAT_SYM_AUTH_CAP(AES_CMAC,
30 		CAP_SET(block_size, 16),
31 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4),
32 			CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
33 	QAT_SYM_AUTH_CAP(SHA224,
34 		CAP_SET(block_size, 64),
35 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1),
36 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
37 	QAT_SYM_AUTH_CAP(SHA256,
38 		CAP_SET(block_size, 64),
39 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1),
40 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
41 	QAT_SYM_AUTH_CAP(SHA384,
42 		CAP_SET(block_size, 128),
43 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1),
44 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
45 	QAT_SYM_AUTH_CAP(SHA512,
46 		CAP_SET(block_size, 128),
47 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1),
48 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
49 	QAT_SYM_AUTH_CAP(SHA1_HMAC,
50 		CAP_SET(block_size, 64),
51 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1),
52 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
53 	QAT_SYM_AUTH_CAP(SHA224_HMAC,
54 		CAP_SET(block_size, 64),
55 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1),
56 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
57 	QAT_SYM_AUTH_CAP(SHA256_HMAC,
58 		CAP_SET(block_size, 64),
59 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1),
60 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
61 	QAT_SYM_AUTH_CAP(SHA384_HMAC,
62 		CAP_SET(block_size, 128),
63 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1),
64 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
65 	QAT_SYM_AUTH_CAP(SHA512_HMAC,
66 		CAP_SET(block_size, 128),
67 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1),
68 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
69 	QAT_SYM_AUTH_CAP(MD5_HMAC,
70 		CAP_SET(block_size, 64),
71 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1),
72 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
73 	QAT_SYM_AUTH_CAP(AES_XCBC_MAC,
74 		CAP_SET(block_size, 16),
75 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0),
76 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
77 	QAT_SYM_AUTH_CAP(SNOW3G_UIA2,
78 		CAP_SET(block_size, 16),
79 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
80 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)),
81 	QAT_SYM_AUTH_CAP(KASUMI_F9,
82 		CAP_SET(block_size, 8),
83 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
84 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
85 	QAT_SYM_AUTH_CAP(NULL,
86 		CAP_SET(block_size, 1),
87 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size),
88 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
89 	QAT_SYM_CIPHER_CAP(AES_CBC,
90 		CAP_SET(block_size, 16),
91 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
92 	QAT_SYM_CIPHER_CAP(AES_CTR,
93 		CAP_SET(block_size, 16),
94 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
95 	QAT_SYM_CIPHER_CAP(AES_XTS,
96 		CAP_SET(block_size, 16),
97 		CAP_RNG(key_size, 32, 64, 32), CAP_RNG(iv_size, 16, 16, 0)),
98 	QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,
99 		CAP_SET(block_size, 16),
100 		CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)),
101 	QAT_SYM_CIPHER_CAP(SNOW3G_UEA2,
102 		CAP_SET(block_size, 16),
103 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
104 	QAT_SYM_CIPHER_CAP(KASUMI_F8,
105 		CAP_SET(block_size, 8),
106 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 8, 8, 0)),
107 	QAT_SYM_CIPHER_CAP(NULL,
108 		CAP_SET(block_size, 1),
109 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)),
110 	QAT_SYM_CIPHER_CAP(3DES_CBC,
111 		CAP_SET(block_size, 8),
112 		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
113 	QAT_SYM_CIPHER_CAP(3DES_CTR,
114 		CAP_SET(block_size, 8),
115 		CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
116 	QAT_SYM_CIPHER_CAP(DES_CBC,
117 		CAP_SET(block_size, 8),
118 		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
119 	QAT_SYM_CIPHER_CAP(DES_DOCSISBPI,
120 		CAP_SET(block_size, 8),
121 		CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0)),
122 	QAT_SYM_CIPHER_CAP(ZUC_EEA3,
123 		CAP_SET(block_size, 16),
124 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
125 	QAT_SYM_AUTH_CAP(ZUC_EIA3,
126 		CAP_SET(block_size, 16),
127 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
128 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)),
129 	QAT_SYM_AEAD_CAP(CHACHA20_POLY1305,
130 		CAP_SET(block_size, 64),
131 		CAP_RNG(key_size, 32, 32, 0),
132 		CAP_RNG(digest_size, 16, 16, 0),
133 		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0)),
134 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
135 };
136 
137 static struct qat_capabilities_info
138 qat_sym_crypto_cap_get_gen3(struct qat_pci_device *qat_dev __rte_unused)
139 {
140 	struct qat_capabilities_info capa_info;
141 	capa_info.data = qat_sym_crypto_caps_gen3;
142 	capa_info.size = sizeof(qat_sym_crypto_caps_gen3);
143 	return capa_info;
144 }
145 
146 static __rte_always_inline void
147 enqueue_one_aead_job_gen3(struct qat_sym_session *ctx,
148 	struct icp_qat_fw_la_bulk_req *req,
149 	struct rte_crypto_va_iova_ptr *iv,
150 	struct rte_crypto_va_iova_ptr *digest,
151 	struct rte_crypto_va_iova_ptr *aad,
152 	union rte_crypto_sym_ofs ofs, uint32_t data_len)
153 {
154 	if (ctx->is_single_pass) {
155 		struct icp_qat_fw_la_cipher_req_params *cipher_param =
156 			(void *)&req->serv_specif_rqpars;
157 
158 		/* QAT GEN3 uses single pass to treat AEAD as
159 		 * cipher operation
160 		 */
161 		cipher_param = (void *)&req->serv_specif_rqpars;
162 
163 		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
164 		cipher_param->cipher_offset = ofs.ofs.cipher.head;
165 		cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
166 				ofs.ofs.cipher.tail;
167 
168 		cipher_param->spc_aad_addr = aad->iova;
169 		cipher_param->spc_auth_res_addr = digest->iova;
170 
171 		return;
172 	}
173 
174 	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
175 }
176 
177 static __rte_always_inline void
178 enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
179 	struct qat_sym_op_cookie *cookie,
180 	struct icp_qat_fw_la_bulk_req *req,
181 	struct rte_crypto_va_iova_ptr *digest,
182 	struct rte_crypto_va_iova_ptr *auth_iv,
183 	union rte_crypto_sym_ofs ofs, uint32_t data_len)
184 {
185 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl;
186 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
187 	uint32_t ver_key_offset;
188 	uint32_t auth_data_len = data_len - ofs.ofs.auth.head -
189 			ofs.ofs.auth.tail;
190 
191 	if (!ctx->is_single_pass_gmac ||
192 			(auth_data_len > QAT_AES_GMAC_SPC_MAX_SIZE)) {
193 		enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
194 				data_len);
195 		return;
196 	}
197 
198 	cipher_cd_ctrl = (void *) &req->cd_ctrl;
199 	cipher_param = (void *)&req->serv_specif_rqpars;
200 	ver_key_offset = sizeof(struct icp_qat_hw_auth_setup) +
201 			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
202 			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
203 			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
204 			sizeof(struct icp_qat_hw_cipher_config);
205 
206 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
207 		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
208 		/* AES-GMAC */
209 		qat_set_cipher_iv(cipher_param, auth_iv, ctx->auth_iv.length,
210 				req);
211 	}
212 
213 	/* Fill separate Content Descriptor for this op */
214 	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
215 			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
216 				ctx->cd.cipher.key :
217 				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
218 			ctx->auth_key_length);
219 	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
220 			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
221 				ICP_QAT_HW_CIPHER_AEAD_MODE,
222 				ctx->qat_cipher_alg,
223 				ICP_QAT_HW_CIPHER_NO_CONVERT,
224 				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
225 					ICP_QAT_HW_CIPHER_ENCRYPT :
226 					ICP_QAT_HW_CIPHER_DECRYPT));
227 	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
228 			ctx->digest_length,
229 			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
230 			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
231 	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
232 			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(auth_data_len);
233 
234 	/* Update the request */
235 	req->cd_pars.u.s.content_desc_addr =
236 			cookie->opt.spc_gmac.cd_phys_addr;
237 	req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
238 			sizeof(struct icp_qat_hw_cipher_config) +
239 			ctx->auth_key_length, 8) >> 3;
240 	req->comn_mid.src_length = data_len;
241 	req->comn_mid.dst_length = 0;
242 
243 	cipher_param->spc_aad_addr = 0;
244 	cipher_param->spc_auth_res_addr = digest->iova;
245 	cipher_param->spc_aad_sz = auth_data_len;
246 	cipher_param->reserved = 0;
247 	cipher_param->spc_auth_res_sz = ctx->digest_length;
248 
249 	req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
250 	cipher_cd_ctrl->cipher_cfg_offset = 0;
251 	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
252 	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
253 	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
254 			req->comn_hdr.serv_specif_flags,
255 			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
256 	ICP_QAT_FW_LA_PROTO_SET(
257 			req->comn_hdr.serv_specif_flags,
258 			ICP_QAT_FW_LA_NO_PROTO);
259 }
260 
261 static int
262 qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx,
263 		uint8_t *out_msg, void *op_cookie)
264 {
265 	register struct icp_qat_fw_la_bulk_req *req;
266 	struct rte_crypto_op *op = in_op;
267 	struct qat_sym_op_cookie *cookie = op_cookie;
268 	struct rte_crypto_sgl in_sgl, out_sgl;
269 	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
270 			out_vec[QAT_SYM_SGL_MAX_NUMBER];
271 	struct rte_crypto_va_iova_ptr cipher_iv;
272 	struct rte_crypto_va_iova_ptr aad;
273 	struct rte_crypto_va_iova_ptr digest;
274 	union rte_crypto_sym_ofs ofs;
275 	int32_t total_len;
276 
277 	in_sgl.vec = in_vec;
278 	out_sgl.vec = out_vec;
279 
280 	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
281 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
282 
283 	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
284 			&cipher_iv, &aad, &digest);
285 	if (unlikely(ofs.raw == UINT64_MAX)) {
286 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
287 		return -EINVAL;
288 	}
289 
290 	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
291 			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
292 	if (unlikely(total_len < 0)) {
293 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
294 		return -EINVAL;
295 	}
296 
297 	enqueue_one_aead_job_gen3(ctx, req, &cipher_iv, &digest, &aad, ofs,
298 		total_len);
299 
300 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
301 	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
302 			NULL, &aad, &digest);
303 #endif
304 
305 	return 0;
306 }
307 
308 static int
309 qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
310 		uint8_t *out_msg, void *op_cookie)
311 {
312 	register struct icp_qat_fw_la_bulk_req *req;
313 	struct rte_crypto_op *op = in_op;
314 	struct qat_sym_op_cookie *cookie = op_cookie;
315 	struct rte_crypto_sgl in_sgl, out_sgl;
316 	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
317 			out_vec[QAT_SYM_SGL_MAX_NUMBER];
318 	struct rte_crypto_va_iova_ptr auth_iv;
319 	struct rte_crypto_va_iova_ptr digest;
320 	union rte_crypto_sym_ofs ofs;
321 	int32_t total_len;
322 
323 	in_sgl.vec = in_vec;
324 	out_sgl.vec = out_vec;
325 
326 	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
327 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
328 
329 	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
330 			NULL, &auth_iv, &digest);
331 	if (unlikely(ofs.raw == UINT64_MAX)) {
332 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
333 		return -EINVAL;
334 	}
335 
336 	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
337 			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
338 	if (unlikely(total_len < 0)) {
339 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
340 		return -EINVAL;
341 	}
342 
343 	enqueue_one_auth_job_gen3(ctx, cookie, req, &digest, &auth_iv,
344 			ofs, total_len);
345 
346 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
347 	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
348 			&auth_iv, NULL, &digest);
349 #endif
350 
351 	return 0;
352 }
353 
354 static int
355 qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
356 {
357 	struct qat_sym_session *ctx = session;
358 	enum rte_proc_type_t proc_type = rte_eal_process_type();
359 	int ret;
360 
361 	if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
362 		return -EINVAL;
363 
364 	ret = qat_sym_crypto_set_session_gen1(cdev, session);
365 	/* special single pass build request for GEN3 */
366 	if (ctx->is_single_pass)
367 		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen3;
368 	else if (ctx->is_single_pass_gmac)
369 		ctx->build_request[proc_type] = qat_sym_build_op_auth_gen3;
370 
371 	if (ret == -ENOTSUP) {
372 		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
373 		 * this is addressed by GEN3
374 		 */
375 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
376 				ctx->qat_cipher_alg !=
377 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
378 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
379 				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
380 		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
381 				ctx->qat_cipher_alg !=
382 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
383 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
384 				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
385 		} else if ((ctx->aes_cmac ||
386 				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
387 				(ctx->qat_cipher_alg ==
388 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
389 				ctx->qat_cipher_alg ==
390 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
391 			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
392 		}
393 
394 		ret = 0;
395 	}
396 
397 	return ret;
398 }
399 
400 static int
401 qat_sym_dp_enqueue_single_aead_gen3(void *qp_data, uint8_t *drv_ctx,
402 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
403 	union rte_crypto_sym_ofs ofs,
404 	struct rte_crypto_va_iova_ptr *iv,
405 	struct rte_crypto_va_iova_ptr *digest,
406 	struct rte_crypto_va_iova_ptr *aad,
407 	void *user_data)
408 {
409 	struct qat_qp *qp = qp_data;
410 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
411 	struct qat_queue *tx_queue = &qp->tx_q;
412 	struct qat_sym_op_cookie *cookie;
413 	struct qat_sym_session *ctx = dp_ctx->session;
414 	struct icp_qat_fw_la_bulk_req *req;
415 
416 	int32_t data_len;
417 	uint32_t tail = dp_ctx->tail;
418 
419 	req = (struct icp_qat_fw_la_bulk_req *)(
420 		(uint8_t *)tx_queue->base_addr + tail);
421 	cookie = qp->op_cookies[tail >> tx_queue->trailz];
422 	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
423 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
424 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
425 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
426 			data, n_data_vecs, NULL, 0);
427 	if (unlikely(data_len < 0))
428 		return -1;
429 
430 	enqueue_one_aead_job_gen3(ctx, req, iv, digest, aad, ofs,
431 		(uint32_t)data_len);
432 
433 	dp_ctx->tail = tail;
434 	dp_ctx->cached_enqueue++;
435 
436 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
437 	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
438 			NULL, aad, digest);
439 #endif
440 	return 0;
441 }
442 
443 static uint32_t
444 qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
445 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
446 	void *user_data[], int *status)
447 {
448 	struct qat_qp *qp = qp_data;
449 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
450 	struct qat_queue *tx_queue = &qp->tx_q;
451 	struct qat_sym_session *ctx = dp_ctx->session;
452 	uint32_t i, n;
453 	uint32_t tail;
454 	struct icp_qat_fw_la_bulk_req *req;
455 	int32_t data_len;
456 
457 	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
458 	if (unlikely(n == 0)) {
459 		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
460 		*status = 0;
461 		return 0;
462 	}
463 
464 	tail = dp_ctx->tail;
465 
466 	for (i = 0; i < n; i++) {
467 		struct qat_sym_op_cookie *cookie =
468 			qp->op_cookies[tail >> tx_queue->trailz];
469 
470 		req  = (struct icp_qat_fw_la_bulk_req *)(
471 			(uint8_t *)tx_queue->base_addr + tail);
472 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
473 
474 		if (vec->dest_sgl) {
475 			data_len = qat_sym_build_req_set_data(req,
476 				user_data[i], cookie,
477 				vec->src_sgl[i].vec, vec->src_sgl[i].num,
478 				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
479 		} else {
480 			data_len = qat_sym_build_req_set_data(req,
481 				user_data[i], cookie,
482 				vec->src_sgl[i].vec,
483 				vec->src_sgl[i].num, NULL, 0);
484 		}
485 
486 		if (unlikely(data_len < 0))
487 			break;
488 
489 		enqueue_one_aead_job_gen3(ctx, req, &vec->iv[i],
490 				&vec->digest[i], &vec->aad[i], ofs,
491 				(uint32_t)data_len);
492 
493 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
494 
495 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
496 		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
497 				vec->src_sgl[i].num, &vec->iv[i], NULL,
498 				&vec->aad[i], &vec->digest[i]);
499 #endif
500 	}
501 
502 	if (unlikely(i < n))
503 		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
504 
505 	dp_ctx->tail = tail;
506 	dp_ctx->cached_enqueue += i;
507 	*status = 0;
508 	return i;
509 }
510 
511 static int
512 qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx,
513 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
514 	union rte_crypto_sym_ofs ofs,
515 	struct rte_crypto_va_iova_ptr *iv __rte_unused,
516 	struct rte_crypto_va_iova_ptr *digest,
517 	struct rte_crypto_va_iova_ptr *auth_iv,
518 	void *user_data)
519 {
520 	struct qat_qp *qp = qp_data;
521 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
522 	struct qat_queue *tx_queue = &qp->tx_q;
523 	struct qat_sym_op_cookie *cookie;
524 	struct qat_sym_session *ctx = dp_ctx->session;
525 	struct icp_qat_fw_la_bulk_req *req;
526 	int32_t data_len;
527 	uint32_t tail = dp_ctx->tail;
528 
529 	req = (struct icp_qat_fw_la_bulk_req *)(
530 		(uint8_t *)tx_queue->base_addr + tail);
531 	cookie = qp->op_cookies[tail >> tx_queue->trailz];
532 	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
533 
534 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
535 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
536 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
537 			data, n_data_vecs, NULL, 0);
538 	if (unlikely(data_len < 0))
539 		return -1;
540 
541 	enqueue_one_auth_job_gen3(ctx, cookie, req, digest, auth_iv, ofs,
542 			(uint32_t)data_len);
543 
544 	dp_ctx->tail = tail;
545 	dp_ctx->cached_enqueue++;
546 
547 	return 0;
548 }
549 
550 static uint32_t
551 qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
552 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
553 	void *user_data[], int *status)
554 {
555 	struct qat_qp *qp = qp_data;
556 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
557 	struct qat_queue *tx_queue = &qp->tx_q;
558 	struct qat_sym_session *ctx = dp_ctx->session;
559 	uint32_t i, n;
560 	uint32_t tail;
561 	struct icp_qat_fw_la_bulk_req *req;
562 	int32_t data_len;
563 
564 	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
565 	if (unlikely(n == 0)) {
566 		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
567 		*status = 0;
568 		return 0;
569 	}
570 
571 	tail = dp_ctx->tail;
572 
573 	for (i = 0; i < n; i++) {
574 		struct qat_sym_op_cookie *cookie =
575 			qp->op_cookies[tail >> tx_queue->trailz];
576 
577 		req  = (struct icp_qat_fw_la_bulk_req *)(
578 			(uint8_t *)tx_queue->base_addr + tail);
579 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
580 
581 		if (vec->dest_sgl) {
582 			data_len = qat_sym_build_req_set_data(req,
583 				user_data[i], cookie,
584 				vec->src_sgl[i].vec, vec->src_sgl[i].num,
585 				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
586 		} else {
587 			data_len = qat_sym_build_req_set_data(req,
588 				user_data[i], cookie,
589 				vec->src_sgl[i].vec,
590 				vec->src_sgl[i].num, NULL, 0);
591 		}
592 
593 		if (unlikely(data_len < 0))
594 			break;
595 		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
596 			&vec->auth_iv[i], ofs, (uint32_t)data_len);
597 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
598 	}
599 
600 	if (unlikely(i < n))
601 		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
602 
603 	dp_ctx->tail = tail;
604 	dp_ctx->cached_enqueue += i;
605 	*status = 0;
606 	return i;
607 }
608 
609 static int
610 qat_sym_configure_raw_dp_ctx_gen3(void *_raw_dp_ctx, void *_ctx)
611 {
612 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
613 	struct qat_sym_session *ctx = _ctx;
614 	int ret;
615 
616 	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
617 	if (ret < 0)
618 		return ret;
619 
620 	if (ctx->is_single_pass) {
621 		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen3;
622 		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen3;
623 	} else if (ctx->is_single_pass_gmac) {
624 		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen3;
625 		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen3;
626 	}
627 
628 	return 0;
629 }
630 
631 
632 RTE_INIT(qat_sym_crypto_gen3_init)
633 {
634 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
635 	qat_sym_gen_dev_ops[QAT_GEN3].get_capabilities =
636 			qat_sym_crypto_cap_get_gen3;
637 	qat_sym_gen_dev_ops[QAT_GEN3].get_feature_flags =
638 			qat_sym_crypto_feature_flags_get_gen1;
639 	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
640 			qat_sym_crypto_set_session_gen3;
641 	qat_sym_gen_dev_ops[QAT_GEN3].set_raw_dp_ctx =
642 			qat_sym_configure_raw_dp_ctx_gen3;
643 #ifdef RTE_LIB_SECURITY
644 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
645 			qat_sym_create_security_gen1;
646 #endif
647 }
648 
649 RTE_INIT(qat_asym_crypto_gen3_init)
650 {
651 	qat_asym_gen_dev_ops[QAT_GEN3].cryptodev_ops = NULL;
652 	qat_asym_gen_dev_ops[QAT_GEN3].get_capabilities = NULL;
653 	qat_asym_gen_dev_ops[QAT_GEN3].get_feature_flags = NULL;
654 	qat_asym_gen_dev_ops[QAT_GEN3].set_session = NULL;
655 }
656