xref: /dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c (revision 4b53e9802b6b6040ad5622b1414aaa93d9581d0c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2022 Intel Corporation
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <cryptodev_pmd.h>
7 #include "qat_sym_session.h"
8 #include "qat_sym.h"
9 #include "qat_asym.h"
10 #include "qat_crypto.h"
11 #include "qat_crypto_pmd_gens.h"
12 
13 static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen3[] = {
14 	QAT_SYM_PLAIN_AUTH_CAP(SHA1,
15 		CAP_SET(block_size, 64),
16 		CAP_RNG(digest_size, 1, 20, 1)),
17 	QAT_SYM_AEAD_CAP(AES_GCM,
18 		CAP_SET(block_size, 16),
19 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
20 		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)),
21 	QAT_SYM_AEAD_CAP(AES_CCM,
22 		CAP_SET(block_size, 16),
23 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2),
24 		CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)),
25 	QAT_SYM_AUTH_CAP(AES_GMAC,
26 		CAP_SET(block_size, 16),
27 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
28 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)),
29 	QAT_SYM_AUTH_CAP(AES_CMAC,
30 		CAP_SET(block_size, 16),
31 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4),
32 			CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
33 	QAT_SYM_AUTH_CAP(SHA224,
34 		CAP_SET(block_size, 64),
35 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1),
36 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
37 	QAT_SYM_AUTH_CAP(SHA256,
38 		CAP_SET(block_size, 64),
39 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1),
40 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
41 	QAT_SYM_AUTH_CAP(SHA384,
42 		CAP_SET(block_size, 128),
43 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1),
44 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
45 	QAT_SYM_AUTH_CAP(SHA512,
46 		CAP_SET(block_size, 128),
47 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1),
48 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
49 	QAT_SYM_AUTH_CAP(SHA1_HMAC,
50 		CAP_SET(block_size, 64),
51 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1),
52 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
53 	QAT_SYM_AUTH_CAP(SHA224_HMAC,
54 		CAP_SET(block_size, 64),
55 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1),
56 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
57 	QAT_SYM_AUTH_CAP(SHA256_HMAC,
58 		CAP_SET(block_size, 64),
59 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1),
60 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
61 	QAT_SYM_AUTH_CAP(SHA384_HMAC,
62 		CAP_SET(block_size, 128),
63 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1),
64 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
65 	QAT_SYM_AUTH_CAP(SHA512_HMAC,
66 		CAP_SET(block_size, 128),
67 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1),
68 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
69 	QAT_SYM_AUTH_CAP(MD5_HMAC,
70 		CAP_SET(block_size, 64),
71 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1),
72 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
73 	QAT_SYM_AUTH_CAP(AES_XCBC_MAC,
74 		CAP_SET(block_size, 16),
75 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0),
76 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
77 	QAT_SYM_AUTH_CAP(SNOW3G_UIA2,
78 		CAP_SET(block_size, 16),
79 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
80 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)),
81 	QAT_SYM_AUTH_CAP(KASUMI_F9,
82 		CAP_SET(block_size, 8),
83 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
84 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
85 	QAT_SYM_AUTH_CAP(NULL,
86 		CAP_SET(block_size, 1),
87 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size),
88 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
89 	QAT_SYM_CIPHER_CAP(AES_CBC,
90 		CAP_SET(block_size, 16),
91 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
92 	QAT_SYM_CIPHER_CAP(AES_CTR,
93 		CAP_SET(block_size, 16),
94 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
95 	QAT_SYM_CIPHER_CAP(AES_XTS,
96 		CAP_SET(block_size, 16),
97 		CAP_RNG(key_size, 32, 64, 32), CAP_RNG(iv_size, 16, 16, 0)),
98 	QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,
99 		CAP_SET(block_size, 16),
100 		CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)),
101 	QAT_SYM_CIPHER_CAP(SNOW3G_UEA2,
102 		CAP_SET(block_size, 16),
103 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
104 	QAT_SYM_CIPHER_CAP(KASUMI_F8,
105 		CAP_SET(block_size, 8),
106 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 8, 8, 0)),
107 	QAT_SYM_CIPHER_CAP(NULL,
108 		CAP_SET(block_size, 1),
109 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)),
110 	QAT_SYM_CIPHER_CAP(3DES_CBC,
111 		CAP_SET(block_size, 8),
112 		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
113 	QAT_SYM_CIPHER_CAP(3DES_CTR,
114 		CAP_SET(block_size, 8),
115 		CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
116 	QAT_SYM_CIPHER_CAP(DES_CBC,
117 		CAP_SET(block_size, 8),
118 		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
119 	QAT_SYM_CIPHER_CAP(DES_DOCSISBPI,
120 		CAP_SET(block_size, 8),
121 		CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0)),
122 	QAT_SYM_CIPHER_CAP(ZUC_EEA3,
123 		CAP_SET(block_size, 16),
124 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
125 	QAT_SYM_AUTH_CAP(ZUC_EIA3,
126 		CAP_SET(block_size, 16),
127 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
128 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)),
129 	QAT_SYM_AEAD_CAP(CHACHA20_POLY1305,
130 		CAP_SET(block_size, 64),
131 		CAP_RNG(key_size, 32, 32, 0),
132 		CAP_RNG(digest_size, 16, 16, 0),
133 		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0)),
134 	QAT_SYM_CIPHER_CAP(SM4_ECB,
135 		CAP_SET(block_size, 16),
136 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 0, 0, 0)),
137 	QAT_SYM_CIPHER_CAP(SM4_CBC,
138 		CAP_SET(block_size, 16),
139 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
140 	QAT_SYM_CIPHER_CAP(SM4_CTR,
141 		CAP_SET(block_size, 16),
142 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
143 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
144 };
145 
146 static int
147 check_cipher_capa(const struct rte_cryptodev_capabilities *cap,
148 		enum rte_crypto_cipher_algorithm algo)
149 {
150 	if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
151 		return 0;
152 	if (cap->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
153 		return 0;
154 	if (cap->sym.cipher.algo != algo)
155 		return 0;
156 	return 1;
157 }
158 
159 static int
160 check_auth_capa(const struct rte_cryptodev_capabilities *cap,
161 		enum rte_crypto_auth_algorithm algo)
162 {
163 	if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
164 		return 0;
165 	if (cap->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
166 		return 0;
167 	if (cap->sym.auth.algo != algo)
168 		return 0;
169 	return 1;
170 }
171 
172 static int
173 qat_sym_crypto_cap_get_gen3(struct qat_cryptodev_private *internals,
174 			const char *capa_memz_name, const uint16_t slice_map)
175 {
176 	const uint32_t size = sizeof(qat_sym_crypto_caps_gen3);
177 	uint32_t i;
178 
179 	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
180 	if (internals->capa_mz == NULL) {
181 		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
182 				size, rte_socket_id(), 0);
183 		if (internals->capa_mz == NULL) {
184 			QAT_LOG(DEBUG,
185 				"Error allocating memzone for capabilities");
186 			return -1;
187 		}
188 	}
189 
190 	struct rte_cryptodev_capabilities *addr =
191 			(struct rte_cryptodev_capabilities *)
192 				internals->capa_mz->addr;
193 	const struct rte_cryptodev_capabilities *capabilities =
194 		qat_sym_crypto_caps_gen3;
195 	const uint32_t capa_num =
196 		size / sizeof(struct rte_cryptodev_capabilities);
197 	uint32_t curr_capa = 0;
198 
199 	for (i = 0; i < capa_num; i++) {
200 		if (slice_map & ICP_ACCEL_MASK_SM4_SLICE && (
201 			check_cipher_capa(&capabilities[i],
202 				RTE_CRYPTO_CIPHER_SM4_ECB) ||
203 			check_cipher_capa(&capabilities[i],
204 				RTE_CRYPTO_CIPHER_SM4_CBC) ||
205 			check_cipher_capa(&capabilities[i],
206 				RTE_CRYPTO_CIPHER_SM4_CTR))) {
207 			continue;
208 		}
209 		if (slice_map & ICP_ACCEL_MASK_SM3_SLICE && (
210 			check_auth_capa(&capabilities[i],
211 				RTE_CRYPTO_AUTH_SM3))) {
212 			continue;
213 		}
214 		memcpy(addr + curr_capa, capabilities + i,
215 			sizeof(struct rte_cryptodev_capabilities));
216 		curr_capa++;
217 	}
218 	internals->qat_dev_capabilities = internals->capa_mz->addr;
219 
220 	return 0;
221 }
222 
223 static __rte_always_inline void
224 enqueue_one_aead_job_gen3(struct qat_sym_session *ctx,
225 	struct icp_qat_fw_la_bulk_req *req,
226 	struct rte_crypto_va_iova_ptr *iv,
227 	struct rte_crypto_va_iova_ptr *digest,
228 	struct rte_crypto_va_iova_ptr *aad,
229 	union rte_crypto_sym_ofs ofs, uint32_t data_len)
230 {
231 	if (ctx->is_single_pass) {
232 		struct icp_qat_fw_la_cipher_req_params *cipher_param =
233 			(void *)&req->serv_specif_rqpars;
234 
235 		/* QAT GEN3 uses single pass to treat AEAD as
236 		 * cipher operation
237 		 */
238 		cipher_param = (void *)&req->serv_specif_rqpars;
239 
240 		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
241 		cipher_param->cipher_offset = ofs.ofs.cipher.head;
242 		cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
243 				ofs.ofs.cipher.tail;
244 
245 		cipher_param->spc_aad_addr = aad->iova;
246 		cipher_param->spc_auth_res_addr = digest->iova;
247 
248 		return;
249 	}
250 
251 	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
252 }
253 
254 static __rte_always_inline void
255 enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
256 	struct qat_sym_op_cookie *cookie,
257 	struct icp_qat_fw_la_bulk_req *req,
258 	struct rte_crypto_va_iova_ptr *digest,
259 	struct rte_crypto_va_iova_ptr *auth_iv,
260 	union rte_crypto_sym_ofs ofs, uint32_t data_len)
261 {
262 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl;
263 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
264 	uint32_t ver_key_offset;
265 	uint32_t auth_data_len = data_len - ofs.ofs.auth.head -
266 			ofs.ofs.auth.tail;
267 
268 	if (!ctx->is_single_pass_gmac ||
269 			(auth_data_len > QAT_AES_GMAC_SPC_MAX_SIZE)) {
270 		enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
271 				data_len);
272 		return;
273 	}
274 
275 	cipher_cd_ctrl = (void *) &req->cd_ctrl;
276 	cipher_param = (void *)&req->serv_specif_rqpars;
277 	ver_key_offset = sizeof(struct icp_qat_hw_auth_setup) +
278 			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
279 			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
280 			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
281 			sizeof(struct icp_qat_hw_cipher_config);
282 
283 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
284 		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
285 		/* AES-GMAC */
286 		qat_set_cipher_iv(cipher_param, auth_iv, ctx->auth_iv.length,
287 				req);
288 	}
289 
290 	/* Fill separate Content Descriptor for this op */
291 	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
292 			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
293 				ctx->cd.cipher.key :
294 				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
295 			ctx->auth_key_length);
296 	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
297 			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
298 				ICP_QAT_HW_CIPHER_AEAD_MODE,
299 				ctx->qat_cipher_alg,
300 				ICP_QAT_HW_CIPHER_NO_CONVERT,
301 				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
302 					ICP_QAT_HW_CIPHER_ENCRYPT :
303 					ICP_QAT_HW_CIPHER_DECRYPT));
304 	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
305 			ctx->digest_length,
306 			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
307 			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
308 	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
309 			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(auth_data_len);
310 
311 	/* Update the request */
312 	req->cd_pars.u.s.content_desc_addr =
313 			cookie->opt.spc_gmac.cd_phys_addr;
314 	req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
315 			sizeof(struct icp_qat_hw_cipher_config) +
316 			ctx->auth_key_length, 8) >> 3;
317 	req->comn_mid.src_length = data_len;
318 	req->comn_mid.dst_length = 0;
319 
320 	cipher_param->spc_aad_addr = 0;
321 	cipher_param->spc_auth_res_addr = digest->iova;
322 	cipher_param->spc_aad_sz = auth_data_len;
323 	cipher_param->reserved = 0;
324 	cipher_param->spc_auth_res_sz = ctx->digest_length;
325 
326 	req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
327 	cipher_cd_ctrl->cipher_cfg_offset = 0;
328 	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
329 	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
330 	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
331 			req->comn_hdr.serv_specif_flags,
332 			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
333 	ICP_QAT_FW_LA_PROTO_SET(
334 			req->comn_hdr.serv_specif_flags,
335 			ICP_QAT_FW_LA_NO_PROTO);
336 }
337 
338 static int
339 qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx,
340 		uint8_t *out_msg, void *op_cookie)
341 {
342 	register struct icp_qat_fw_la_bulk_req *req;
343 	struct rte_crypto_op *op = in_op;
344 	struct qat_sym_op_cookie *cookie = op_cookie;
345 	struct rte_crypto_sgl in_sgl, out_sgl;
346 	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
347 			out_vec[QAT_SYM_SGL_MAX_NUMBER];
348 	struct rte_crypto_va_iova_ptr cipher_iv;
349 	struct rte_crypto_va_iova_ptr aad;
350 	struct rte_crypto_va_iova_ptr digest;
351 	union rte_crypto_sym_ofs ofs;
352 	int32_t total_len;
353 
354 	in_sgl.vec = in_vec;
355 	out_sgl.vec = out_vec;
356 
357 	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
358 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
359 
360 	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
361 			&cipher_iv, &aad, &digest);
362 	if (unlikely(ofs.raw == UINT64_MAX)) {
363 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
364 		return -EINVAL;
365 	}
366 
367 	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
368 			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
369 	if (unlikely(total_len < 0)) {
370 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
371 		return -EINVAL;
372 	}
373 
374 	enqueue_one_aead_job_gen3(ctx, req, &cipher_iv, &digest, &aad, ofs,
375 		total_len);
376 
377 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
378 	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
379 			NULL, &aad, &digest);
380 #endif
381 
382 	return 0;
383 }
384 
385 static int
386 qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
387 		uint8_t *out_msg, void *op_cookie)
388 {
389 	register struct icp_qat_fw_la_bulk_req *req;
390 	struct rte_crypto_op *op = in_op;
391 	struct qat_sym_op_cookie *cookie = op_cookie;
392 	struct rte_crypto_sgl in_sgl, out_sgl;
393 	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
394 			out_vec[QAT_SYM_SGL_MAX_NUMBER];
395 	struct rte_crypto_va_iova_ptr auth_iv;
396 	struct rte_crypto_va_iova_ptr digest;
397 	union rte_crypto_sym_ofs ofs;
398 	int32_t total_len;
399 
400 	in_sgl.vec = in_vec;
401 	out_sgl.vec = out_vec;
402 
403 	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
404 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
405 
406 	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
407 			NULL, &auth_iv, &digest);
408 	if (unlikely(ofs.raw == UINT64_MAX)) {
409 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
410 		return -EINVAL;
411 	}
412 
413 	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
414 			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
415 	if (unlikely(total_len < 0)) {
416 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
417 		return -EINVAL;
418 	}
419 
420 	enqueue_one_auth_job_gen3(ctx, cookie, req, &digest, &auth_iv,
421 			ofs, total_len);
422 
423 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
424 	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
425 			&auth_iv, NULL, &digest);
426 #endif
427 
428 	return 0;
429 }
430 
431 static int
432 qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
433 {
434 	struct qat_sym_session *ctx = session;
435 	enum rte_proc_type_t proc_type = rte_eal_process_type();
436 	int ret;
437 
438 	if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
439 		return -EINVAL;
440 
441 	ret = qat_sym_crypto_set_session_gen1(cdev, session);
442 	/* special single pass build request for GEN3 */
443 	if (ctx->is_single_pass)
444 		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen3;
445 	else if (ctx->is_single_pass_gmac)
446 		ctx->build_request[proc_type] = qat_sym_build_op_auth_gen3;
447 
448 	if (ret == -ENOTSUP) {
449 		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
450 		 * this is addressed by GEN3
451 		 */
452 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
453 				ctx->qat_cipher_alg !=
454 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
455 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
456 				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
457 		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
458 				ctx->qat_cipher_alg !=
459 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
460 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
461 				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
462 		} else if ((ctx->aes_cmac ||
463 				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
464 				(ctx->qat_cipher_alg ==
465 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
466 				ctx->qat_cipher_alg ==
467 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
468 			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
469 		}
470 
471 		ret = 0;
472 	}
473 
474 	return ret;
475 }
476 
477 static int
478 qat_sym_dp_enqueue_single_aead_gen3(void *qp_data, uint8_t *drv_ctx,
479 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
480 	union rte_crypto_sym_ofs ofs,
481 	struct rte_crypto_va_iova_ptr *iv,
482 	struct rte_crypto_va_iova_ptr *digest,
483 	struct rte_crypto_va_iova_ptr *aad,
484 	void *user_data)
485 {
486 	struct qat_qp *qp = qp_data;
487 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
488 	struct qat_queue *tx_queue = &qp->tx_q;
489 	struct qat_sym_op_cookie *cookie;
490 	struct qat_sym_session *ctx = dp_ctx->session;
491 	struct icp_qat_fw_la_bulk_req *req;
492 
493 	int32_t data_len;
494 	uint32_t tail = dp_ctx->tail;
495 
496 	req = (struct icp_qat_fw_la_bulk_req *)(
497 		(uint8_t *)tx_queue->base_addr + tail);
498 	cookie = qp->op_cookies[tail >> tx_queue->trailz];
499 	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
500 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
501 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
502 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
503 			data, n_data_vecs, NULL, 0);
504 	if (unlikely(data_len < 0))
505 		return -1;
506 
507 	enqueue_one_aead_job_gen3(ctx, req, iv, digest, aad, ofs,
508 		(uint32_t)data_len);
509 
510 	dp_ctx->tail = tail;
511 	dp_ctx->cached_enqueue++;
512 
513 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
514 	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
515 			NULL, aad, digest);
516 #endif
517 	return 0;
518 }
519 
520 static uint32_t
521 qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
522 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
523 	void *user_data[], int *status)
524 {
525 	struct qat_qp *qp = qp_data;
526 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
527 	struct qat_queue *tx_queue = &qp->tx_q;
528 	struct qat_sym_session *ctx = dp_ctx->session;
529 	uint32_t i, n;
530 	uint32_t tail;
531 	struct icp_qat_fw_la_bulk_req *req;
532 	int32_t data_len;
533 
534 	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
535 	if (unlikely(n == 0)) {
536 		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
537 		*status = 0;
538 		return 0;
539 	}
540 
541 	tail = dp_ctx->tail;
542 
543 	for (i = 0; i < n; i++) {
544 		struct qat_sym_op_cookie *cookie =
545 			qp->op_cookies[tail >> tx_queue->trailz];
546 
547 		req  = (struct icp_qat_fw_la_bulk_req *)(
548 			(uint8_t *)tx_queue->base_addr + tail);
549 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
550 
551 		if (vec->dest_sgl) {
552 			data_len = qat_sym_build_req_set_data(req,
553 				user_data[i], cookie,
554 				vec->src_sgl[i].vec, vec->src_sgl[i].num,
555 				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
556 		} else {
557 			data_len = qat_sym_build_req_set_data(req,
558 				user_data[i], cookie,
559 				vec->src_sgl[i].vec,
560 				vec->src_sgl[i].num, NULL, 0);
561 		}
562 
563 		if (unlikely(data_len < 0))
564 			break;
565 
566 		enqueue_one_aead_job_gen3(ctx, req, &vec->iv[i],
567 				&vec->digest[i], &vec->aad[i], ofs,
568 				(uint32_t)data_len);
569 
570 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
571 
572 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
573 		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
574 				vec->src_sgl[i].num, &vec->iv[i], NULL,
575 				&vec->aad[i], &vec->digest[i]);
576 #endif
577 	}
578 
579 	if (unlikely(i < n))
580 		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
581 
582 	dp_ctx->tail = tail;
583 	dp_ctx->cached_enqueue += i;
584 	*status = 0;
585 	return i;
586 }
587 
588 static int
589 qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx,
590 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
591 	union rte_crypto_sym_ofs ofs,
592 	struct rte_crypto_va_iova_ptr *iv __rte_unused,
593 	struct rte_crypto_va_iova_ptr *digest,
594 	struct rte_crypto_va_iova_ptr *auth_iv,
595 	void *user_data)
596 {
597 	struct qat_qp *qp = qp_data;
598 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
599 	struct qat_queue *tx_queue = &qp->tx_q;
600 	struct qat_sym_op_cookie *cookie;
601 	struct qat_sym_session *ctx = dp_ctx->session;
602 	struct icp_qat_fw_la_bulk_req *req;
603 	int32_t data_len;
604 	uint32_t tail = dp_ctx->tail;
605 
606 	req = (struct icp_qat_fw_la_bulk_req *)(
607 		(uint8_t *)tx_queue->base_addr + tail);
608 	cookie = qp->op_cookies[tail >> tx_queue->trailz];
609 	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
610 
611 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
612 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
613 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
614 			data, n_data_vecs, NULL, 0);
615 	if (unlikely(data_len < 0))
616 		return -1;
617 
618 	enqueue_one_auth_job_gen3(ctx, cookie, req, digest, auth_iv, ofs,
619 			(uint32_t)data_len);
620 
621 	dp_ctx->tail = tail;
622 	dp_ctx->cached_enqueue++;
623 
624 	return 0;
625 }
626 
627 static uint32_t
628 qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
629 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
630 	void *user_data[], int *status)
631 {
632 	struct qat_qp *qp = qp_data;
633 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
634 	struct qat_queue *tx_queue = &qp->tx_q;
635 	struct qat_sym_session *ctx = dp_ctx->session;
636 	uint32_t i, n;
637 	uint32_t tail;
638 	struct icp_qat_fw_la_bulk_req *req;
639 	int32_t data_len;
640 
641 	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
642 	if (unlikely(n == 0)) {
643 		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
644 		*status = 0;
645 		return 0;
646 	}
647 
648 	tail = dp_ctx->tail;
649 
650 	for (i = 0; i < n; i++) {
651 		struct qat_sym_op_cookie *cookie =
652 			qp->op_cookies[tail >> tx_queue->trailz];
653 
654 		req  = (struct icp_qat_fw_la_bulk_req *)(
655 			(uint8_t *)tx_queue->base_addr + tail);
656 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
657 
658 		if (vec->dest_sgl) {
659 			data_len = qat_sym_build_req_set_data(req,
660 				user_data[i], cookie,
661 				vec->src_sgl[i].vec, vec->src_sgl[i].num,
662 				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
663 		} else {
664 			data_len = qat_sym_build_req_set_data(req,
665 				user_data[i], cookie,
666 				vec->src_sgl[i].vec,
667 				vec->src_sgl[i].num, NULL, 0);
668 		}
669 
670 		if (unlikely(data_len < 0))
671 			break;
672 		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
673 			&vec->auth_iv[i], ofs, (uint32_t)data_len);
674 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
675 	}
676 
677 	if (unlikely(i < n))
678 		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
679 
680 	dp_ctx->tail = tail;
681 	dp_ctx->cached_enqueue += i;
682 	*status = 0;
683 	return i;
684 }
685 
686 static int
687 qat_sym_configure_raw_dp_ctx_gen3(void *_raw_dp_ctx, void *_ctx)
688 {
689 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
690 	struct qat_sym_session *ctx = _ctx;
691 	int ret;
692 
693 	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
694 	if (ret < 0)
695 		return ret;
696 
697 	if (ctx->is_single_pass) {
698 		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen3;
699 		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen3;
700 	} else if (ctx->is_single_pass_gmac) {
701 		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen3;
702 		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen3;
703 	}
704 
705 	return 0;
706 }
707 
708 
709 RTE_INIT(qat_sym_crypto_gen3_init)
710 {
711 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
712 	qat_sym_gen_dev_ops[QAT_GEN3].get_capabilities =
713 			qat_sym_crypto_cap_get_gen3;
714 	qat_sym_gen_dev_ops[QAT_GEN3].get_feature_flags =
715 			qat_sym_crypto_feature_flags_get_gen1;
716 	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
717 			qat_sym_crypto_set_session_gen3;
718 	qat_sym_gen_dev_ops[QAT_GEN3].set_raw_dp_ctx =
719 			qat_sym_configure_raw_dp_ctx_gen3;
720 #ifdef RTE_LIB_SECURITY
721 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
722 			qat_sym_create_security_gen1;
723 #endif
724 }
725 
726 RTE_INIT(qat_asym_crypto_gen3_init)
727 {
728 	qat_asym_gen_dev_ops[QAT_GEN3].cryptodev_ops = NULL;
729 	qat_asym_gen_dev_ops[QAT_GEN3].get_capabilities = NULL;
730 	qat_asym_gen_dev_ops[QAT_GEN3].get_feature_flags = NULL;
731 	qat_asym_gen_dev_ops[QAT_GEN3].set_session = NULL;
732 }
733