xref: /dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c (revision 3a80d7fb2ecdd6e8e48e56e3726b26980fa2a089)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2022 Intel Corporation
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <cryptodev_pmd.h>
7 #include "qat_sym_session.h"
8 #include "qat_sym.h"
9 #include "qat_asym.h"
10 #include "qat_crypto.h"
11 #include "qat_crypto_pmd_gens.h"
12 
13 static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen3[] = {
14 	QAT_SYM_PLAIN_AUTH_CAP(SHA1,
15 		CAP_SET(block_size, 64),
16 		CAP_RNG(digest_size, 1, 20, 1)),
17 	QAT_SYM_AEAD_CAP(AES_GCM,
18 		CAP_SET(block_size, 16),
19 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
20 		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)),
21 	QAT_SYM_AEAD_CAP(AES_CCM,
22 		CAP_SET(block_size, 16),
23 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2),
24 		CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)),
25 	QAT_SYM_AUTH_CAP(AES_GMAC,
26 		CAP_SET(block_size, 16),
27 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
28 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)),
29 	QAT_SYM_AUTH_CAP(AES_CMAC,
30 		CAP_SET(block_size, 16),
31 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4),
32 			CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
33 	QAT_SYM_AUTH_CAP(SHA224,
34 		CAP_SET(block_size, 64),
35 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1),
36 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
37 	QAT_SYM_AUTH_CAP(SHA256,
38 		CAP_SET(block_size, 64),
39 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1),
40 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
41 	QAT_SYM_AUTH_CAP(SHA384,
42 		CAP_SET(block_size, 128),
43 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1),
44 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
45 	QAT_SYM_AUTH_CAP(SHA512,
46 		CAP_SET(block_size, 128),
47 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1),
48 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
49 	QAT_SYM_PLAIN_AUTH_CAP(SHA3_224,
50 		CAP_SET(block_size, 144),
51 		CAP_RNG(digest_size, 28, 28, 0)),
52 	QAT_SYM_PLAIN_AUTH_CAP(SHA3_256,
53 		CAP_SET(block_size, 136),
54 		CAP_RNG(digest_size, 32, 32, 0)),
55 	QAT_SYM_PLAIN_AUTH_CAP(SHA3_384,
56 		CAP_SET(block_size, 104),
57 		CAP_RNG(digest_size, 48, 48, 0)),
58 	QAT_SYM_PLAIN_AUTH_CAP(SHA3_512,
59 		CAP_SET(block_size, 72),
60 		CAP_RNG(digest_size, 64, 64, 0)),
61 	QAT_SYM_AUTH_CAP(SHA1_HMAC,
62 		CAP_SET(block_size, 64),
63 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1),
64 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
65 	QAT_SYM_AUTH_CAP(SHA224_HMAC,
66 		CAP_SET(block_size, 64),
67 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1),
68 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
69 	QAT_SYM_AUTH_CAP(SHA256_HMAC,
70 		CAP_SET(block_size, 64),
71 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1),
72 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
73 	QAT_SYM_AUTH_CAP(SHA384_HMAC,
74 		CAP_SET(block_size, 128),
75 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1),
76 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
77 	QAT_SYM_AUTH_CAP(SHA512_HMAC,
78 		CAP_SET(block_size, 128),
79 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1),
80 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
81 	QAT_SYM_AUTH_CAP(MD5_HMAC,
82 		CAP_SET(block_size, 64),
83 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1),
84 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
85 	QAT_SYM_AUTH_CAP(AES_XCBC_MAC,
86 		CAP_SET(block_size, 16),
87 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0),
88 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
89 	QAT_SYM_AUTH_CAP(SNOW3G_UIA2,
90 		CAP_SET(block_size, 16),
91 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
92 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)),
93 	QAT_SYM_AUTH_CAP(KASUMI_F9,
94 		CAP_SET(block_size, 8),
95 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
96 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
97 	QAT_SYM_AUTH_CAP(NULL,
98 		CAP_SET(block_size, 1),
99 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size),
100 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
101 	QAT_SYM_CIPHER_CAP(AES_CBC,
102 		CAP_SET(block_size, 16),
103 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
104 	QAT_SYM_CIPHER_CAP(AES_CTR,
105 		CAP_SET(block_size, 16),
106 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
107 	QAT_SYM_CIPHER_CAP(AES_XTS,
108 		CAP_SET(block_size, 16),
109 		CAP_RNG(key_size, 32, 64, 32), CAP_RNG(iv_size, 16, 16, 0)),
110 	QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,
111 		CAP_SET(block_size, 16),
112 		CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)),
113 	QAT_SYM_CIPHER_CAP(SNOW3G_UEA2,
114 		CAP_SET(block_size, 16),
115 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
116 	QAT_SYM_CIPHER_CAP(KASUMI_F8,
117 		CAP_SET(block_size, 8),
118 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 8, 8, 0)),
119 	QAT_SYM_CIPHER_CAP(NULL,
120 		CAP_SET(block_size, 1),
121 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)),
122 	QAT_SYM_CIPHER_CAP(3DES_CBC,
123 		CAP_SET(block_size, 8),
124 		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
125 	QAT_SYM_CIPHER_CAP(3DES_CTR,
126 		CAP_SET(block_size, 8),
127 		CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
128 	QAT_SYM_CIPHER_CAP(DES_CBC,
129 		CAP_SET(block_size, 8),
130 		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
131 	QAT_SYM_CIPHER_CAP(DES_DOCSISBPI,
132 		CAP_SET(block_size, 8),
133 		CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0)),
134 	QAT_SYM_CIPHER_CAP(ZUC_EEA3,
135 		CAP_SET(block_size, 16),
136 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
137 	QAT_SYM_AUTH_CAP(ZUC_EIA3,
138 		CAP_SET(block_size, 16),
139 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
140 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)),
141 	QAT_SYM_AEAD_CAP(CHACHA20_POLY1305,
142 		CAP_SET(block_size, 64),
143 		CAP_RNG(key_size, 32, 32, 0),
144 		CAP_RNG(digest_size, 16, 16, 0),
145 		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0)),
146 	QAT_SYM_CIPHER_CAP(SM4_ECB,
147 		CAP_SET(block_size, 16),
148 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 0, 0, 0)),
149 	QAT_SYM_CIPHER_CAP(SM4_CBC,
150 		CAP_SET(block_size, 16),
151 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
152 	QAT_SYM_CIPHER_CAP(SM4_CTR,
153 		CAP_SET(block_size, 16),
154 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
155 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
156 };
157 
158 static int
159 check_cipher_capa(const struct rte_cryptodev_capabilities *cap,
160 		enum rte_crypto_cipher_algorithm algo)
161 {
162 	if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
163 		return 0;
164 	if (cap->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
165 		return 0;
166 	if (cap->sym.cipher.algo != algo)
167 		return 0;
168 	return 1;
169 }
170 
171 static int
172 check_auth_capa(const struct rte_cryptodev_capabilities *cap,
173 		enum rte_crypto_auth_algorithm algo)
174 {
175 	if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
176 		return 0;
177 	if (cap->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
178 		return 0;
179 	if (cap->sym.auth.algo != algo)
180 		return 0;
181 	return 1;
182 }
183 
184 static int
185 qat_sym_crypto_cap_get_gen3(struct qat_cryptodev_private *internals,
186 			const char *capa_memz_name, const uint16_t slice_map)
187 {
188 	const uint32_t size = sizeof(qat_sym_crypto_caps_gen3);
189 	uint32_t i;
190 
191 	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
192 	if (internals->capa_mz == NULL) {
193 		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
194 				size, rte_socket_id(), 0);
195 		if (internals->capa_mz == NULL) {
196 			QAT_LOG(DEBUG,
197 				"Error allocating memzone for capabilities");
198 			return -1;
199 		}
200 	}
201 
202 	struct rte_cryptodev_capabilities *addr =
203 			(struct rte_cryptodev_capabilities *)
204 				internals->capa_mz->addr;
205 	const struct rte_cryptodev_capabilities *capabilities =
206 		qat_sym_crypto_caps_gen3;
207 	const uint32_t capa_num =
208 		size / sizeof(struct rte_cryptodev_capabilities);
209 	uint32_t curr_capa = 0;
210 
211 	for (i = 0; i < capa_num; i++) {
212 		if (slice_map & ICP_ACCEL_MASK_SM4_SLICE && (
213 			check_cipher_capa(&capabilities[i],
214 				RTE_CRYPTO_CIPHER_SM4_ECB) ||
215 			check_cipher_capa(&capabilities[i],
216 				RTE_CRYPTO_CIPHER_SM4_CBC) ||
217 			check_cipher_capa(&capabilities[i],
218 				RTE_CRYPTO_CIPHER_SM4_CTR))) {
219 			continue;
220 		}
221 		if (slice_map & ICP_ACCEL_MASK_SM3_SLICE && (
222 			check_auth_capa(&capabilities[i],
223 				RTE_CRYPTO_AUTH_SM3))) {
224 			continue;
225 		}
226 		memcpy(addr + curr_capa, capabilities + i,
227 			sizeof(struct rte_cryptodev_capabilities));
228 		curr_capa++;
229 	}
230 	internals->qat_dev_capabilities = internals->capa_mz->addr;
231 
232 	return 0;
233 }
234 
235 static __rte_always_inline void
236 enqueue_one_aead_job_gen3(struct qat_sym_session *ctx,
237 	struct icp_qat_fw_la_bulk_req *req,
238 	struct rte_crypto_va_iova_ptr *iv,
239 	struct rte_crypto_va_iova_ptr *digest,
240 	struct rte_crypto_va_iova_ptr *aad,
241 	union rte_crypto_sym_ofs ofs, uint32_t data_len)
242 {
243 	if (ctx->is_single_pass) {
244 		struct icp_qat_fw_la_cipher_req_params *cipher_param =
245 			(void *)&req->serv_specif_rqpars;
246 
247 		/* QAT GEN3 uses single pass to treat AEAD as
248 		 * cipher operation
249 		 */
250 		cipher_param = (void *)&req->serv_specif_rqpars;
251 
252 		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
253 		cipher_param->cipher_offset = ofs.ofs.cipher.head;
254 		cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
255 				ofs.ofs.cipher.tail;
256 
257 		cipher_param->spc_aad_addr = aad->iova;
258 		cipher_param->spc_auth_res_addr = digest->iova;
259 
260 		return;
261 	}
262 
263 	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
264 }
265 
266 static __rte_always_inline void
267 enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
268 	struct qat_sym_op_cookie *cookie,
269 	struct icp_qat_fw_la_bulk_req *req,
270 	struct rte_crypto_va_iova_ptr *digest,
271 	struct rte_crypto_va_iova_ptr *auth_iv,
272 	union rte_crypto_sym_ofs ofs, uint32_t data_len)
273 {
274 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl;
275 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
276 	uint32_t ver_key_offset;
277 	uint32_t auth_data_len = data_len - ofs.ofs.auth.head -
278 			ofs.ofs.auth.tail;
279 
280 	if (!ctx->is_single_pass_gmac ||
281 			(auth_data_len > QAT_AES_GMAC_SPC_MAX_SIZE)) {
282 		enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
283 				data_len);
284 		return;
285 	}
286 
287 	cipher_cd_ctrl = (void *) &req->cd_ctrl;
288 	cipher_param = (void *)&req->serv_specif_rqpars;
289 	ver_key_offset = sizeof(struct icp_qat_hw_auth_setup) +
290 			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
291 			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
292 			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
293 			sizeof(struct icp_qat_hw_cipher_config);
294 
295 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
296 		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
297 		/* AES-GMAC */
298 		qat_set_cipher_iv(cipher_param, auth_iv, ctx->auth_iv.length,
299 				req);
300 	}
301 
302 	/* Fill separate Content Descriptor for this op */
303 	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
304 			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
305 				ctx->cd.cipher.key :
306 				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
307 			ctx->auth_key_length);
308 	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
309 			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
310 				ICP_QAT_HW_CIPHER_AEAD_MODE,
311 				ctx->qat_cipher_alg,
312 				ICP_QAT_HW_CIPHER_NO_CONVERT,
313 				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
314 					ICP_QAT_HW_CIPHER_ENCRYPT :
315 					ICP_QAT_HW_CIPHER_DECRYPT));
316 	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
317 			ctx->digest_length,
318 			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
319 			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
320 	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
321 			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(auth_data_len);
322 
323 	/* Update the request */
324 	req->cd_pars.u.s.content_desc_addr =
325 			cookie->opt.spc_gmac.cd_phys_addr;
326 	req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
327 			sizeof(struct icp_qat_hw_cipher_config) +
328 			ctx->auth_key_length, 8) >> 3;
329 	req->comn_mid.src_length = data_len;
330 	req->comn_mid.dst_length = 0;
331 
332 	cipher_param->spc_aad_addr = 0;
333 	cipher_param->spc_auth_res_addr = digest->iova;
334 	cipher_param->spc_aad_sz = auth_data_len;
335 	cipher_param->reserved = 0;
336 	cipher_param->spc_auth_res_sz = ctx->digest_length;
337 
338 	req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
339 	cipher_cd_ctrl->cipher_cfg_offset = 0;
340 	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
341 	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
342 	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
343 			req->comn_hdr.serv_specif_flags,
344 			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
345 	ICP_QAT_FW_LA_PROTO_SET(
346 			req->comn_hdr.serv_specif_flags,
347 			ICP_QAT_FW_LA_NO_PROTO);
348 }
349 
350 static int
351 qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx,
352 		uint8_t *out_msg, void *op_cookie)
353 {
354 	register struct icp_qat_fw_la_bulk_req *req;
355 	struct rte_crypto_op *op = in_op;
356 	struct qat_sym_op_cookie *cookie = op_cookie;
357 	struct rte_crypto_sgl in_sgl, out_sgl;
358 	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
359 			out_vec[QAT_SYM_SGL_MAX_NUMBER];
360 	struct rte_crypto_va_iova_ptr cipher_iv;
361 	struct rte_crypto_va_iova_ptr aad;
362 	struct rte_crypto_va_iova_ptr digest;
363 	union rte_crypto_sym_ofs ofs;
364 	int32_t total_len;
365 
366 	in_sgl.vec = in_vec;
367 	out_sgl.vec = out_vec;
368 
369 	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
370 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
371 
372 	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
373 			&cipher_iv, &aad, &digest);
374 	if (unlikely(ofs.raw == UINT64_MAX)) {
375 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
376 		return -EINVAL;
377 	}
378 
379 	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
380 			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
381 	if (unlikely(total_len < 0)) {
382 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
383 		return -EINVAL;
384 	}
385 
386 	enqueue_one_aead_job_gen3(ctx, req, &cipher_iv, &digest, &aad, ofs,
387 		total_len);
388 
389 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
390 	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
391 			NULL, &aad, &digest);
392 #endif
393 
394 	return 0;
395 }
396 
397 static int
398 qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
399 		uint8_t *out_msg, void *op_cookie)
400 {
401 	register struct icp_qat_fw_la_bulk_req *req;
402 	struct rte_crypto_op *op = in_op;
403 	struct qat_sym_op_cookie *cookie = op_cookie;
404 	struct rte_crypto_sgl in_sgl, out_sgl;
405 	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
406 			out_vec[QAT_SYM_SGL_MAX_NUMBER];
407 	struct rte_crypto_va_iova_ptr auth_iv;
408 	struct rte_crypto_va_iova_ptr digest;
409 	union rte_crypto_sym_ofs ofs;
410 	int32_t total_len;
411 
412 	in_sgl.vec = in_vec;
413 	out_sgl.vec = out_vec;
414 
415 	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
416 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
417 
418 	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
419 			NULL, &auth_iv, &digest);
420 	if (unlikely(ofs.raw == UINT64_MAX)) {
421 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
422 		return -EINVAL;
423 	}
424 
425 	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
426 			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
427 	if (unlikely(total_len < 0)) {
428 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
429 		return -EINVAL;
430 	}
431 
432 	enqueue_one_auth_job_gen3(ctx, cookie, req, &digest, &auth_iv,
433 			ofs, total_len);
434 
435 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
436 	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
437 			&auth_iv, NULL, &digest);
438 #endif
439 
440 	return 0;
441 }
442 
443 static int
444 qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
445 {
446 	struct qat_sym_session *ctx = session;
447 	enum rte_proc_type_t proc_type = rte_eal_process_type();
448 	int ret;
449 
450 	if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
451 		return -EINVAL;
452 
453 	ret = qat_sym_crypto_set_session_gen1(cdev, session);
454 	/* special single pass build request for GEN3 */
455 	if (ctx->is_single_pass)
456 		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen3;
457 	else if (ctx->is_single_pass_gmac)
458 		ctx->build_request[proc_type] = qat_sym_build_op_auth_gen3;
459 
460 	if (ret == -ENOTSUP) {
461 		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
462 		 * this is addressed by GEN3
463 		 */
464 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
465 				ctx->qat_cipher_alg !=
466 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
467 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
468 				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
469 		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
470 				ctx->qat_cipher_alg !=
471 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
472 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
473 				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
474 		} else if ((ctx->aes_cmac ||
475 				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
476 				(ctx->qat_cipher_alg ==
477 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
478 				ctx->qat_cipher_alg ==
479 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
480 			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
481 		}
482 
483 		ret = 0;
484 	}
485 
486 	return ret;
487 }
488 
489 static int
490 qat_sym_dp_enqueue_single_aead_gen3(void *qp_data, uint8_t *drv_ctx,
491 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
492 	union rte_crypto_sym_ofs ofs,
493 	struct rte_crypto_va_iova_ptr *iv,
494 	struct rte_crypto_va_iova_ptr *digest,
495 	struct rte_crypto_va_iova_ptr *aad,
496 	void *user_data)
497 {
498 	struct qat_qp *qp = qp_data;
499 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
500 	struct qat_queue *tx_queue = &qp->tx_q;
501 	struct qat_sym_op_cookie *cookie;
502 	struct qat_sym_session *ctx = dp_ctx->session;
503 	struct icp_qat_fw_la_bulk_req *req;
504 
505 	int32_t data_len;
506 	uint32_t tail = dp_ctx->tail;
507 
508 	req = (struct icp_qat_fw_la_bulk_req *)(
509 		(uint8_t *)tx_queue->base_addr + tail);
510 	cookie = qp->op_cookies[tail >> tx_queue->trailz];
511 	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
512 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
513 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
514 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
515 			data, n_data_vecs, NULL, 0);
516 	if (unlikely(data_len < 0))
517 		return -1;
518 
519 	enqueue_one_aead_job_gen3(ctx, req, iv, digest, aad, ofs,
520 		(uint32_t)data_len);
521 
522 	dp_ctx->tail = tail;
523 	dp_ctx->cached_enqueue++;
524 
525 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
526 	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
527 			NULL, aad, digest);
528 #endif
529 	return 0;
530 }
531 
532 static uint32_t
533 qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
534 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
535 	void *user_data[], int *status)
536 {
537 	struct qat_qp *qp = qp_data;
538 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
539 	struct qat_queue *tx_queue = &qp->tx_q;
540 	struct qat_sym_session *ctx = dp_ctx->session;
541 	uint32_t i, n;
542 	uint32_t tail;
543 	struct icp_qat_fw_la_bulk_req *req;
544 	int32_t data_len;
545 
546 	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
547 	if (unlikely(n == 0)) {
548 		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
549 		*status = 0;
550 		return 0;
551 	}
552 
553 	tail = dp_ctx->tail;
554 
555 	for (i = 0; i < n; i++) {
556 		struct qat_sym_op_cookie *cookie =
557 			qp->op_cookies[tail >> tx_queue->trailz];
558 
559 		req  = (struct icp_qat_fw_la_bulk_req *)(
560 			(uint8_t *)tx_queue->base_addr + tail);
561 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
562 
563 		if (vec->dest_sgl) {
564 			data_len = qat_sym_build_req_set_data(req,
565 				user_data[i], cookie,
566 				vec->src_sgl[i].vec, vec->src_sgl[i].num,
567 				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
568 		} else {
569 			data_len = qat_sym_build_req_set_data(req,
570 				user_data[i], cookie,
571 				vec->src_sgl[i].vec,
572 				vec->src_sgl[i].num, NULL, 0);
573 		}
574 
575 		if (unlikely(data_len < 0))
576 			break;
577 
578 		enqueue_one_aead_job_gen3(ctx, req, &vec->iv[i],
579 				&vec->digest[i], &vec->aad[i], ofs,
580 				(uint32_t)data_len);
581 
582 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
583 
584 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
585 		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
586 				vec->src_sgl[i].num, &vec->iv[i], NULL,
587 				&vec->aad[i], &vec->digest[i]);
588 #endif
589 	}
590 
591 	if (unlikely(i < n))
592 		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
593 
594 	dp_ctx->tail = tail;
595 	dp_ctx->cached_enqueue += i;
596 	*status = 0;
597 	return i;
598 }
599 
600 static int
601 qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx,
602 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
603 	union rte_crypto_sym_ofs ofs,
604 	struct rte_crypto_va_iova_ptr *iv __rte_unused,
605 	struct rte_crypto_va_iova_ptr *digest,
606 	struct rte_crypto_va_iova_ptr *auth_iv,
607 	void *user_data)
608 {
609 	struct qat_qp *qp = qp_data;
610 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
611 	struct qat_queue *tx_queue = &qp->tx_q;
612 	struct qat_sym_op_cookie *cookie;
613 	struct qat_sym_session *ctx = dp_ctx->session;
614 	struct icp_qat_fw_la_bulk_req *req;
615 	int32_t data_len;
616 	uint32_t tail = dp_ctx->tail;
617 
618 	req = (struct icp_qat_fw_la_bulk_req *)(
619 		(uint8_t *)tx_queue->base_addr + tail);
620 	cookie = qp->op_cookies[tail >> tx_queue->trailz];
621 	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
622 
623 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
624 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
625 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
626 			data, n_data_vecs, NULL, 0);
627 	if (unlikely(data_len < 0))
628 		return -1;
629 
630 	enqueue_one_auth_job_gen3(ctx, cookie, req, digest, auth_iv, ofs,
631 			(uint32_t)data_len);
632 
633 	dp_ctx->tail = tail;
634 	dp_ctx->cached_enqueue++;
635 
636 	return 0;
637 }
638 
639 static uint32_t
640 qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
641 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
642 	void *user_data[], int *status)
643 {
644 	struct qat_qp *qp = qp_data;
645 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
646 	struct qat_queue *tx_queue = &qp->tx_q;
647 	struct qat_sym_session *ctx = dp_ctx->session;
648 	uint32_t i, n;
649 	uint32_t tail;
650 	struct icp_qat_fw_la_bulk_req *req;
651 	int32_t data_len;
652 
653 	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
654 	if (unlikely(n == 0)) {
655 		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
656 		*status = 0;
657 		return 0;
658 	}
659 
660 	tail = dp_ctx->tail;
661 
662 	for (i = 0; i < n; i++) {
663 		struct qat_sym_op_cookie *cookie =
664 			qp->op_cookies[tail >> tx_queue->trailz];
665 
666 		req  = (struct icp_qat_fw_la_bulk_req *)(
667 			(uint8_t *)tx_queue->base_addr + tail);
668 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
669 
670 		if (vec->dest_sgl) {
671 			data_len = qat_sym_build_req_set_data(req,
672 				user_data[i], cookie,
673 				vec->src_sgl[i].vec, vec->src_sgl[i].num,
674 				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
675 		} else {
676 			data_len = qat_sym_build_req_set_data(req,
677 				user_data[i], cookie,
678 				vec->src_sgl[i].vec,
679 				vec->src_sgl[i].num, NULL, 0);
680 		}
681 
682 		if (unlikely(data_len < 0))
683 			break;
684 		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
685 			&vec->auth_iv[i], ofs, (uint32_t)data_len);
686 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
687 	}
688 
689 	if (unlikely(i < n))
690 		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
691 
692 	dp_ctx->tail = tail;
693 	dp_ctx->cached_enqueue += i;
694 	*status = 0;
695 	return i;
696 }
697 
698 static int
699 qat_sym_configure_raw_dp_ctx_gen3(void *_raw_dp_ctx, void *_ctx)
700 {
701 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
702 	struct qat_sym_session *ctx = _ctx;
703 	int ret;
704 
705 	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
706 	if (ret < 0)
707 		return ret;
708 
709 	if (ctx->is_single_pass) {
710 		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen3;
711 		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen3;
712 	} else if (ctx->is_single_pass_gmac) {
713 		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen3;
714 		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen3;
715 	}
716 
717 	return 0;
718 }
719 
720 
721 RTE_INIT(qat_sym_crypto_gen3_init)
722 {
723 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
724 	qat_sym_gen_dev_ops[QAT_GEN3].get_capabilities =
725 			qat_sym_crypto_cap_get_gen3;
726 	qat_sym_gen_dev_ops[QAT_GEN3].get_feature_flags =
727 			qat_sym_crypto_feature_flags_get_gen1;
728 	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
729 			qat_sym_crypto_set_session_gen3;
730 	qat_sym_gen_dev_ops[QAT_GEN3].set_raw_dp_ctx =
731 			qat_sym_configure_raw_dp_ctx_gen3;
732 #ifdef RTE_LIB_SECURITY
733 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
734 			qat_sym_create_security_gen1;
735 #endif
736 }
737 
738 RTE_INIT(qat_asym_crypto_gen3_init)
739 {
740 	qat_asym_gen_dev_ops[QAT_GEN3].cryptodev_ops = NULL;
741 	qat_asym_gen_dev_ops[QAT_GEN3].get_capabilities = NULL;
742 	qat_asym_gen_dev_ops[QAT_GEN3].get_feature_flags = NULL;
743 	qat_asym_gen_dev_ops[QAT_GEN3].set_session = NULL;
744 }
745