xref: /dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c (revision 3da59f30a23f2e795d2315f3d949e1b3e0ce0c3d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2022 Intel Corporation
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <cryptodev_pmd.h>
7 #include "qat_sym_session.h"
8 #include "qat_sym.h"
9 #include "qat_asym.h"
10 #include "qat_crypto.h"
11 #include "qat_crypto_pmd_gens.h"
12 
13 
14 static struct rte_cryptodev_capabilities qat_sym_crypto_legacy_caps_gen3[] = {
15 	QAT_SYM_CIPHER_CAP(3DES_CBC,
16 		CAP_SET(block_size, 8),
17 		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
18 	QAT_SYM_CIPHER_CAP(DES_CBC,
19 		CAP_SET(block_size, 8),
20 		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
21 	QAT_SYM_CIPHER_CAP(3DES_CTR,
22 		CAP_SET(block_size, 8),
23 		CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
24 	QAT_SYM_PLAIN_AUTH_CAP(SHA1,
25 		CAP_SET(block_size, 64),
26 		CAP_RNG(digest_size, 1, 20, 1)),
27 	QAT_SYM_AUTH_CAP(SHA224,
28 		CAP_SET(block_size, 64),
29 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1),
30 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
31 	QAT_SYM_AUTH_CAP(SHA224_HMAC,
32 		CAP_SET(block_size, 64),
33 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1),
34 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
35 	QAT_SYM_AUTH_CAP(SHA1_HMAC,
36 		CAP_SET(block_size, 64),
37 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1),
38 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
39 	QAT_SYM_AUTH_CAP(MD5_HMAC,
40 		CAP_SET(block_size, 64),
41 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1),
42 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
43 	QAT_SYM_CIPHER_CAP(DES_DOCSISBPI,
44 		CAP_SET(block_size, 8),
45 		CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0)),
46 	QAT_SYM_PLAIN_AUTH_CAP(SHA3_224,
47 		CAP_SET(block_size, 144),
48 		CAP_RNG(digest_size, 28, 28, 0)),
49 	QAT_SYM_CIPHER_CAP(SM4_ECB,
50 		CAP_SET(block_size, 16),
51 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 0, 0, 0))
52 };
53 
54 static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen3[] = {
55 	QAT_SYM_AEAD_CAP(AES_GCM,
56 		CAP_SET(block_size, 16),
57 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
58 		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)),
59 	QAT_SYM_AEAD_CAP(AES_CCM,
60 		CAP_SET(block_size, 16),
61 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2),
62 		CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)),
63 	QAT_SYM_AUTH_CAP(AES_GMAC,
64 		CAP_SET(block_size, 16),
65 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
66 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)),
67 	QAT_SYM_AUTH_CAP(AES_CMAC,
68 		CAP_SET(block_size, 16),
69 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4),
70 			CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
71 	QAT_SYM_AUTH_CAP(SHA256,
72 		CAP_SET(block_size, 64),
73 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1),
74 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
75 	QAT_SYM_AUTH_CAP(SHA384,
76 		CAP_SET(block_size, 128),
77 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1),
78 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
79 	QAT_SYM_AUTH_CAP(SHA512,
80 		CAP_SET(block_size, 128),
81 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1),
82 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
83 	QAT_SYM_PLAIN_AUTH_CAP(SHA3_256,
84 		CAP_SET(block_size, 136),
85 		CAP_RNG(digest_size, 32, 32, 0)),
86 	QAT_SYM_PLAIN_AUTH_CAP(SHA3_384,
87 		CAP_SET(block_size, 104),
88 		CAP_RNG(digest_size, 48, 48, 0)),
89 	QAT_SYM_PLAIN_AUTH_CAP(SHA3_512,
90 		CAP_SET(block_size, 72),
91 		CAP_RNG(digest_size, 64, 64, 0)),
92 	QAT_SYM_AUTH_CAP(SHA256_HMAC,
93 		CAP_SET(block_size, 64),
94 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1),
95 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
96 	QAT_SYM_AUTH_CAP(SHA384_HMAC,
97 		CAP_SET(block_size, 128),
98 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1),
99 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
100 	QAT_SYM_AUTH_CAP(SHA512_HMAC,
101 		CAP_SET(block_size, 128),
102 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1),
103 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
104 	QAT_SYM_AUTH_CAP(AES_XCBC_MAC,
105 		CAP_SET(block_size, 16),
106 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0),
107 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
108 	QAT_SYM_AUTH_CAP(SNOW3G_UIA2,
109 		CAP_SET(block_size, 16),
110 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
111 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)),
112 	QAT_SYM_AUTH_CAP(KASUMI_F9,
113 		CAP_SET(block_size, 8),
114 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
115 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
116 	QAT_SYM_AUTH_CAP(NULL,
117 		CAP_SET(block_size, 1),
118 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size),
119 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
120 	QAT_SYM_CIPHER_CAP(AES_CBC,
121 		CAP_SET(block_size, 16),
122 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
123 	QAT_SYM_CIPHER_CAP(AES_CTR,
124 		CAP_SET(block_size, 16),
125 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
126 	QAT_SYM_CIPHER_CAP(AES_XTS,
127 		CAP_SET(block_size, 16),
128 		CAP_RNG(key_size, 32, 64, 32), CAP_RNG(iv_size, 16, 16, 0)),
129 	QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,
130 		CAP_SET(block_size, 16),
131 		CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)),
132 	QAT_SYM_CIPHER_CAP(SNOW3G_UEA2,
133 		CAP_SET(block_size, 16),
134 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
135 	QAT_SYM_CIPHER_CAP(KASUMI_F8,
136 		CAP_SET(block_size, 8),
137 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 8, 8, 0)),
138 	QAT_SYM_CIPHER_CAP(NULL,
139 		CAP_SET(block_size, 1),
140 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)),
141 	QAT_SYM_CIPHER_CAP(ZUC_EEA3,
142 		CAP_SET(block_size, 16),
143 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
144 	QAT_SYM_AUTH_CAP(ZUC_EIA3,
145 		CAP_SET(block_size, 16),
146 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
147 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)),
148 	QAT_SYM_AEAD_CAP(CHACHA20_POLY1305,
149 		CAP_SET(block_size, 64),
150 		CAP_RNG(key_size, 32, 32, 0),
151 		CAP_RNG(digest_size, 16, 16, 0),
152 		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0)),
153 	QAT_SYM_CIPHER_CAP(SM4_CBC,
154 		CAP_SET(block_size, 16),
155 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
156 	QAT_SYM_CIPHER_CAP(SM4_CTR,
157 		CAP_SET(block_size, 16),
158 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
159 	QAT_SYM_PLAIN_AUTH_CAP(SM3,
160 		CAP_SET(block_size, 64),
161 		CAP_RNG(digest_size, 32, 32, 0)),
162 	QAT_SYM_AUTH_CAP(SM3_HMAC,
163 		CAP_SET(block_size, 64),
164 		CAP_RNG(key_size, 16, 64, 4), CAP_RNG(digest_size, 32, 32, 0),
165 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
166 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
167 };
168 
169 static int
170 check_cipher_capa(const struct rte_cryptodev_capabilities *cap,
171 		enum rte_crypto_cipher_algorithm algo)
172 {
173 	if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
174 		return 0;
175 	if (cap->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
176 		return 0;
177 	if (cap->sym.cipher.algo != algo)
178 		return 0;
179 	return 1;
180 }
181 
182 static int
183 check_auth_capa(const struct rte_cryptodev_capabilities *cap,
184 		enum rte_crypto_auth_algorithm algo)
185 {
186 	if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
187 		return 0;
188 	if (cap->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
189 		return 0;
190 	if (cap->sym.auth.algo != algo)
191 		return 0;
192 	return 1;
193 }
194 
195 static int
196 qat_sym_crypto_cap_get_gen3(struct qat_cryptodev_private *internals,
197 			const char *capa_memz_name, const uint16_t slice_map)
198 {
199 
200 	uint32_t i, iter = 0;
201 	uint32_t curr_capa = 0;
202 	uint32_t capa_num, legacy_capa_num;
203 	uint32_t size = sizeof(qat_sym_crypto_caps_gen3);
204 	uint32_t legacy_size = sizeof(qat_sym_crypto_legacy_caps_gen3);
205 	capa_num = size/sizeof(struct rte_cryptodev_capabilities);
206 	legacy_capa_num = legacy_size/sizeof(struct rte_cryptodev_capabilities);
207 
208 	if (unlikely(qat_legacy_capa))
209 		size = size + legacy_size;
210 
211 	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
212 	if (internals->capa_mz == NULL) {
213 		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
214 				size, rte_socket_id(), 0);
215 		if (internals->capa_mz == NULL) {
216 			QAT_LOG(DEBUG,
217 				"Error allocating memzone for capabilities");
218 			return -1;
219 		}
220 	}
221 
222 	struct rte_cryptodev_capabilities *addr =
223 			(struct rte_cryptodev_capabilities *)
224 				internals->capa_mz->addr;
225 	struct rte_cryptodev_capabilities *capabilities;
226 
227 	if (unlikely(qat_legacy_capa)) {
228 		capabilities = qat_sym_crypto_legacy_caps_gen3;
229 		capa_num += legacy_capa_num;
230 	} else {
231 		capabilities = qat_sym_crypto_caps_gen3;
232 	}
233 
234 	for (i = 0; i < capa_num; i++, iter++) {
235 		if (unlikely(qat_legacy_capa) && (i == legacy_capa_num)) {
236 			capabilities = qat_sym_crypto_caps_gen3;
237 			addr += curr_capa;
238 			curr_capa = 0;
239 			iter = 0;
240 		}
241 
242 		if (slice_map & ICP_ACCEL_MASK_SM4_SLICE && (
243 			check_cipher_capa(&capabilities[iter],
244 				RTE_CRYPTO_CIPHER_SM4_ECB) ||
245 			check_cipher_capa(&capabilities[iter],
246 				RTE_CRYPTO_CIPHER_SM4_CBC) ||
247 			check_cipher_capa(&capabilities[iter],
248 				RTE_CRYPTO_CIPHER_SM4_CTR))) {
249 			continue;
250 		}
251 		if (slice_map & ICP_ACCEL_MASK_SM3_SLICE && (
252 			check_auth_capa(&capabilities[iter],
253 				RTE_CRYPTO_AUTH_SM3) ||
254 			check_auth_capa(&capabilities[iter],
255 				RTE_CRYPTO_AUTH_SM3_HMAC))) {
256 			continue;
257 		}
258 		memcpy(addr + curr_capa, capabilities + iter,
259 			sizeof(struct rte_cryptodev_capabilities));
260 		curr_capa++;
261 	}
262 	internals->qat_dev_capabilities = internals->capa_mz->addr;
263 
264 	return 0;
265 }
266 
267 static __rte_always_inline void
268 enqueue_one_aead_job_gen3(struct qat_sym_session *ctx,
269 	struct icp_qat_fw_la_bulk_req *req,
270 	struct rte_crypto_va_iova_ptr *iv,
271 	struct rte_crypto_va_iova_ptr *digest,
272 	struct rte_crypto_va_iova_ptr *aad,
273 	union rte_crypto_sym_ofs ofs, uint32_t data_len)
274 {
275 	if (ctx->is_single_pass) {
276 		struct icp_qat_fw_la_cipher_req_params *cipher_param =
277 			(void *)&req->serv_specif_rqpars;
278 
279 		/* QAT GEN3 uses single pass to treat AEAD as
280 		 * cipher operation
281 		 */
282 		cipher_param = (void *)&req->serv_specif_rqpars;
283 
284 		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
285 		cipher_param->cipher_offset = ofs.ofs.cipher.head;
286 		cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
287 				ofs.ofs.cipher.tail;
288 
289 		cipher_param->spc_aad_addr = aad->iova;
290 		cipher_param->spc_auth_res_addr = digest->iova;
291 
292 		return;
293 	}
294 
295 	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
296 }
297 
298 static __rte_always_inline void
299 enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
300 	struct qat_sym_op_cookie *cookie,
301 	struct icp_qat_fw_la_bulk_req *req,
302 	struct rte_crypto_va_iova_ptr *digest,
303 	struct rte_crypto_va_iova_ptr *auth_iv,
304 	union rte_crypto_sym_ofs ofs, uint32_t data_len)
305 {
306 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl;
307 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
308 	uint32_t ver_key_offset;
309 	uint32_t auth_data_len = data_len - ofs.ofs.auth.head -
310 			ofs.ofs.auth.tail;
311 
312 	if (!ctx->is_single_pass_gmac ||
313 			(auth_data_len > QAT_AES_GMAC_SPC_MAX_SIZE)) {
314 		enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
315 				data_len);
316 		return;
317 	}
318 
319 	cipher_cd_ctrl = (void *) &req->cd_ctrl;
320 	cipher_param = (void *)&req->serv_specif_rqpars;
321 	ver_key_offset = sizeof(struct icp_qat_hw_auth_setup) +
322 			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
323 			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
324 			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
325 			sizeof(struct icp_qat_hw_cipher_config);
326 
327 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
328 		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
329 		/* AES-GMAC */
330 		qat_set_cipher_iv(cipher_param, auth_iv, ctx->auth_iv.length,
331 				req);
332 	}
333 
334 	/* Fill separate Content Descriptor for this op */
335 	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
336 			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
337 				ctx->cd.cipher.key :
338 				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
339 			ctx->auth_key_length);
340 	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
341 			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
342 				ICP_QAT_HW_CIPHER_AEAD_MODE,
343 				ctx->qat_cipher_alg,
344 				ICP_QAT_HW_CIPHER_NO_CONVERT,
345 				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
346 					ICP_QAT_HW_CIPHER_ENCRYPT :
347 					ICP_QAT_HW_CIPHER_DECRYPT));
348 	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
349 			ctx->digest_length,
350 			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
351 			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
352 	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
353 			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(auth_data_len);
354 
355 	/* Update the request */
356 	req->cd_pars.u.s.content_desc_addr =
357 			cookie->opt.spc_gmac.cd_phys_addr;
358 	req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
359 			sizeof(struct icp_qat_hw_cipher_config) +
360 			ctx->auth_key_length, 8) >> 3;
361 	req->comn_mid.src_length = data_len;
362 	req->comn_mid.dst_length = 0;
363 
364 	cipher_param->spc_aad_addr = 0;
365 	cipher_param->spc_auth_res_addr = digest->iova;
366 	cipher_param->spc_aad_sz = auth_data_len;
367 	cipher_param->reserved = 0;
368 	cipher_param->spc_auth_res_sz = ctx->digest_length;
369 
370 	req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
371 	cipher_cd_ctrl->cipher_cfg_offset = 0;
372 	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
373 	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
374 	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
375 			req->comn_hdr.serv_specif_flags,
376 			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
377 	ICP_QAT_FW_LA_PROTO_SET(
378 			req->comn_hdr.serv_specif_flags,
379 			ICP_QAT_FW_LA_NO_PROTO);
380 }
381 
382 static int
383 qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx,
384 		uint8_t *out_msg, void *op_cookie)
385 {
386 	register struct icp_qat_fw_la_bulk_req *req;
387 	struct rte_crypto_op *op = in_op;
388 	struct qat_sym_op_cookie *cookie = op_cookie;
389 	struct rte_crypto_sgl in_sgl, out_sgl;
390 	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
391 			out_vec[QAT_SYM_SGL_MAX_NUMBER];
392 	struct rte_crypto_va_iova_ptr cipher_iv;
393 	struct rte_crypto_va_iova_ptr aad;
394 	struct rte_crypto_va_iova_ptr digest;
395 	union rte_crypto_sym_ofs ofs;
396 	int32_t total_len;
397 
398 	in_sgl.vec = in_vec;
399 	out_sgl.vec = out_vec;
400 
401 	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
402 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
403 
404 	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
405 			&cipher_iv, &aad, &digest);
406 	if (unlikely(ofs.raw == UINT64_MAX)) {
407 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
408 		return -EINVAL;
409 	}
410 
411 	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
412 			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
413 	if (unlikely(total_len < 0)) {
414 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
415 		return -EINVAL;
416 	}
417 
418 	enqueue_one_aead_job_gen3(ctx, req, &cipher_iv, &digest, &aad, ofs,
419 		total_len);
420 
421 	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
422 			NULL, &aad, &digest);
423 
424 	return 0;
425 }
426 
427 static int
428 qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
429 		uint8_t *out_msg, void *op_cookie)
430 {
431 	register struct icp_qat_fw_la_bulk_req *req;
432 	struct rte_crypto_op *op = in_op;
433 	struct qat_sym_op_cookie *cookie = op_cookie;
434 	struct rte_crypto_sgl in_sgl, out_sgl;
435 	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
436 			out_vec[QAT_SYM_SGL_MAX_NUMBER];
437 	struct rte_crypto_va_iova_ptr auth_iv;
438 	struct rte_crypto_va_iova_ptr digest;
439 	union rte_crypto_sym_ofs ofs;
440 	int32_t total_len;
441 
442 	in_sgl.vec = in_vec;
443 	out_sgl.vec = out_vec;
444 
445 	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
446 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
447 
448 	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
449 			NULL, &auth_iv, &digest, op_cookie);
450 	if (unlikely(ofs.raw == UINT64_MAX)) {
451 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
452 		return -EINVAL;
453 	}
454 
455 	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
456 			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
457 	if (unlikely(total_len < 0)) {
458 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
459 		return -EINVAL;
460 	}
461 
462 	enqueue_one_auth_job_gen3(ctx, cookie, req, &digest, &auth_iv,
463 			ofs, total_len);
464 
465 	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
466 			&auth_iv, NULL, &digest);
467 
468 	return 0;
469 }
470 
471 static int
472 qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
473 {
474 	struct qat_sym_session *ctx = session;
475 	enum rte_proc_type_t proc_type = rte_eal_process_type();
476 	int ret;
477 
478 	if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
479 		return -EINVAL;
480 
481 	ret = qat_sym_crypto_set_session_gen1(cdev, session);
482 	/* special single pass build request for GEN3 */
483 	if (ctx->is_single_pass)
484 		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen3;
485 	else if (ctx->is_single_pass_gmac)
486 		ctx->build_request[proc_type] = qat_sym_build_op_auth_gen3;
487 
488 	if (ret == -ENOTSUP) {
489 		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
490 		 * this is addressed by GEN3
491 		 */
492 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
493 				ctx->qat_cipher_alg !=
494 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
495 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
496 				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
497 		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
498 				ctx->qat_cipher_alg !=
499 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
500 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
501 				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
502 		} else if ((ctx->aes_cmac ||
503 				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
504 				(ctx->qat_cipher_alg ==
505 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
506 				ctx->qat_cipher_alg ==
507 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
508 			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
509 		}
510 
511 		ret = 0;
512 	}
513 
514 	return ret;
515 }
516 
517 static int
518 qat_sym_dp_enqueue_single_aead_gen3(void *qp_data, uint8_t *drv_ctx,
519 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
520 	union rte_crypto_sym_ofs ofs,
521 	struct rte_crypto_va_iova_ptr *iv,
522 	struct rte_crypto_va_iova_ptr *digest,
523 	struct rte_crypto_va_iova_ptr *aad,
524 	void *user_data)
525 {
526 	struct qat_qp *qp = qp_data;
527 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
528 	struct qat_queue *tx_queue = &qp->tx_q;
529 	struct qat_sym_op_cookie *cookie;
530 	struct qat_sym_session *ctx = dp_ctx->session;
531 	struct icp_qat_fw_la_bulk_req *req;
532 
533 	int32_t data_len;
534 	uint32_t tail = dp_ctx->tail;
535 
536 	req = (struct icp_qat_fw_la_bulk_req *)(
537 		(uint8_t *)tx_queue->base_addr + tail);
538 	cookie = qp->op_cookies[tail >> tx_queue->trailz];
539 	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
540 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
541 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
542 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
543 			data, n_data_vecs, NULL, 0);
544 	if (unlikely(data_len < 0))
545 		return -1;
546 
547 	enqueue_one_aead_job_gen3(ctx, req, iv, digest, aad, ofs,
548 		(uint32_t)data_len);
549 
550 	dp_ctx->tail = tail;
551 	dp_ctx->cached_enqueue++;
552 
553 	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
554 			NULL, aad, digest);
555 
556 	return 0;
557 }
558 
559 static uint32_t
560 qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
561 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
562 	void *user_data[], int *status)
563 {
564 	struct qat_qp *qp = qp_data;
565 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
566 	struct qat_queue *tx_queue = &qp->tx_q;
567 	struct qat_sym_session *ctx = dp_ctx->session;
568 	uint32_t i, n;
569 	uint32_t tail;
570 	struct icp_qat_fw_la_bulk_req *req;
571 	int32_t data_len;
572 
573 	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
574 	if (unlikely(n == 0)) {
575 		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
576 		*status = 0;
577 		return 0;
578 	}
579 
580 	tail = dp_ctx->tail;
581 
582 	for (i = 0; i < n; i++) {
583 		struct qat_sym_op_cookie *cookie =
584 			qp->op_cookies[tail >> tx_queue->trailz];
585 
586 		req  = (struct icp_qat_fw_la_bulk_req *)(
587 			(uint8_t *)tx_queue->base_addr + tail);
588 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
589 
590 		if (vec->dest_sgl) {
591 			data_len = qat_sym_build_req_set_data(req,
592 				user_data[i], cookie,
593 				vec->src_sgl[i].vec, vec->src_sgl[i].num,
594 				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
595 		} else {
596 			data_len = qat_sym_build_req_set_data(req,
597 				user_data[i], cookie,
598 				vec->src_sgl[i].vec,
599 				vec->src_sgl[i].num, NULL, 0);
600 		}
601 
602 		if (unlikely(data_len < 0))
603 			break;
604 
605 		enqueue_one_aead_job_gen3(ctx, req, &vec->iv[i],
606 				&vec->digest[i], &vec->aad[i], ofs,
607 				(uint32_t)data_len);
608 
609 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
610 
611 		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
612 				vec->src_sgl[i].num, &vec->iv[i], NULL,
613 				&vec->aad[i], &vec->digest[i]);
614 	}
615 
616 	if (unlikely(i < n))
617 		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
618 
619 	dp_ctx->tail = tail;
620 	dp_ctx->cached_enqueue += i;
621 	*status = 0;
622 	return i;
623 }
624 
625 static int
626 qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx,
627 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
628 	union rte_crypto_sym_ofs ofs,
629 	struct rte_crypto_va_iova_ptr *iv __rte_unused,
630 	struct rte_crypto_va_iova_ptr *digest,
631 	struct rte_crypto_va_iova_ptr *auth_iv,
632 	void *user_data)
633 {
634 	struct qat_qp *qp = qp_data;
635 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
636 	struct qat_queue *tx_queue = &qp->tx_q;
637 	struct qat_sym_op_cookie *cookie;
638 	struct qat_sym_session *ctx = dp_ctx->session;
639 	struct icp_qat_fw_la_bulk_req *req;
640 	int32_t data_len;
641 	uint32_t tail = dp_ctx->tail;
642 	struct rte_crypto_va_iova_ptr null_digest;
643 	struct rte_crypto_va_iova_ptr *job_digest = digest;
644 
645 	req = (struct icp_qat_fw_la_bulk_req *)(
646 		(uint8_t *)tx_queue->base_addr + tail);
647 	cookie = qp->op_cookies[tail >> tx_queue->trailz];
648 	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
649 
650 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
651 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
652 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
653 			data, n_data_vecs, NULL, 0);
654 	if (unlikely(data_len < 0))
655 		return -1;
656 
657 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
658 		null_digest.iova = cookie->digest_null_phys_addr;
659 		job_digest = &null_digest;
660 	}
661 
662 	enqueue_one_auth_job_gen3(ctx, cookie, req, job_digest, auth_iv, ofs,
663 			(uint32_t)data_len);
664 
665 	dp_ctx->tail = tail;
666 	dp_ctx->cached_enqueue++;
667 
668 	return 0;
669 }
670 
671 static uint32_t
672 qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
673 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
674 	void *user_data[], int *status)
675 {
676 	struct qat_qp *qp = qp_data;
677 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
678 	struct qat_queue *tx_queue = &qp->tx_q;
679 	struct qat_sym_session *ctx = dp_ctx->session;
680 	uint32_t i, n;
681 	uint32_t tail;
682 	struct icp_qat_fw_la_bulk_req *req;
683 	int32_t data_len;
684 	struct rte_crypto_va_iova_ptr null_digest;
685 	struct rte_crypto_va_iova_ptr *job_digest = NULL;
686 
687 	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
688 	if (unlikely(n == 0)) {
689 		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
690 		*status = 0;
691 		return 0;
692 	}
693 
694 	tail = dp_ctx->tail;
695 
696 	for (i = 0; i < n; i++) {
697 		struct qat_sym_op_cookie *cookie =
698 			qp->op_cookies[tail >> tx_queue->trailz];
699 
700 		req  = (struct icp_qat_fw_la_bulk_req *)(
701 			(uint8_t *)tx_queue->base_addr + tail);
702 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
703 
704 		if (vec->dest_sgl) {
705 			data_len = qat_sym_build_req_set_data(req,
706 				user_data[i], cookie,
707 				vec->src_sgl[i].vec, vec->src_sgl[i].num,
708 				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
709 		} else {
710 			data_len = qat_sym_build_req_set_data(req,
711 				user_data[i], cookie,
712 				vec->src_sgl[i].vec,
713 				vec->src_sgl[i].num, NULL, 0);
714 		}
715 
716 		if (unlikely(data_len < 0))
717 			break;
718 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
719 			null_digest.iova = cookie->digest_null_phys_addr;
720 			job_digest = &null_digest;
721 		} else
722 			job_digest = &vec->digest[i];
723 
724 		enqueue_one_auth_job_gen3(ctx, cookie, req, job_digest,
725 			&vec->auth_iv[i], ofs, (uint32_t)data_len);
726 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
727 	}
728 
729 	if (unlikely(i < n))
730 		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
731 
732 	dp_ctx->tail = tail;
733 	dp_ctx->cached_enqueue += i;
734 	*status = 0;
735 	return i;
736 }
737 
738 static int
739 qat_sym_configure_raw_dp_ctx_gen3(void *_raw_dp_ctx, void *_ctx)
740 {
741 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
742 	struct qat_sym_session *ctx = _ctx;
743 	int ret;
744 
745 	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
746 	if (ret < 0)
747 		return ret;
748 
749 	if (ctx->is_single_pass) {
750 		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen3;
751 		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen3;
752 	} else if (ctx->is_single_pass_gmac) {
753 		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen3;
754 		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen3;
755 	}
756 
757 	return 0;
758 }
759 
760 
761 RTE_INIT(qat_sym_crypto_gen3_init)
762 {
763 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
764 	qat_sym_gen_dev_ops[QAT_GEN3].get_capabilities =
765 			qat_sym_crypto_cap_get_gen3;
766 	qat_sym_gen_dev_ops[QAT_GEN3].get_feature_flags =
767 			qat_sym_crypto_feature_flags_get_gen1;
768 	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
769 			qat_sym_crypto_set_session_gen3;
770 	qat_sym_gen_dev_ops[QAT_GEN3].set_raw_dp_ctx =
771 			qat_sym_configure_raw_dp_ctx_gen3;
772 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
773 			qat_sym_create_security_gen1;
774 }
775 
776 RTE_INIT(qat_asym_crypto_gen3_init)
777 {
778 	qat_asym_gen_dev_ops[QAT_GEN3].cryptodev_ops =
779 			&qat_asym_crypto_ops_gen1;
780 	qat_asym_gen_dev_ops[QAT_GEN3].get_capabilities =
781 			qat_asym_crypto_cap_get_gen1;
782 	qat_asym_gen_dev_ops[QAT_GEN3].get_feature_flags =
783 			qat_asym_crypto_feature_flags_get_gen1;
784 	qat_asym_gen_dev_ops[QAT_GEN3].set_session =
785 			qat_asym_crypto_set_session_gen1;
786 }
787