xref: /dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2022 Intel Corporation
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <cryptodev_pmd.h>
7 #include "qat_sym_session.h"
8 #include "qat_sym.h"
9 #include "qat_asym.h"
10 #include "qat_crypto.h"
11 #include "qat_crypto_pmd_gens.h"
12 
13 
14 static struct rte_cryptodev_capabilities qat_sym_crypto_legacy_caps_gen3[] = {
15 	QAT_SYM_CIPHER_CAP(3DES_CBC,
16 		CAP_SET(block_size, 8),
17 		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
18 	QAT_SYM_CIPHER_CAP(DES_CBC,
19 		CAP_SET(block_size, 8),
20 		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
21 	QAT_SYM_CIPHER_CAP(3DES_CTR,
22 		CAP_SET(block_size, 8),
23 		CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
24 	QAT_SYM_PLAIN_AUTH_CAP(SHA1,
25 		CAP_SET(block_size, 64),
26 		CAP_RNG(digest_size, 1, 20, 1)),
27 	QAT_SYM_AUTH_CAP(SHA224,
28 		CAP_SET(block_size, 64),
29 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1),
30 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
31 	QAT_SYM_AUTH_CAP(SHA224_HMAC,
32 		CAP_SET(block_size, 64),
33 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1),
34 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
35 	QAT_SYM_AUTH_CAP(SHA1_HMAC,
36 		CAP_SET(block_size, 64),
37 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1),
38 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
39 	QAT_SYM_AUTH_CAP(MD5_HMAC,
40 		CAP_SET(block_size, 64),
41 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1),
42 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
43 	QAT_SYM_CIPHER_CAP(DES_DOCSISBPI,
44 		CAP_SET(block_size, 8),
45 		CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0)),
46 	QAT_SYM_PLAIN_AUTH_CAP(SHA3_224,
47 		CAP_SET(block_size, 144),
48 		CAP_RNG(digest_size, 28, 28, 0)),
49 	QAT_SYM_CIPHER_CAP(SM4_ECB,
50 		CAP_SET(block_size, 16),
51 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 0, 0, 0))
52 };
53 
54 static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen3[] = {
55 	QAT_SYM_AEAD_CAP(AES_GCM,
56 		CAP_SET(block_size, 16),
57 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
58 		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)),
59 	QAT_SYM_AEAD_CAP(AES_CCM,
60 		CAP_SET(block_size, 16),
61 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2),
62 		CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)),
63 	QAT_SYM_AUTH_CAP(AES_GMAC,
64 		CAP_SET(block_size, 16),
65 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
66 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)),
67 	QAT_SYM_AUTH_CAP(AES_CMAC,
68 		CAP_SET(block_size, 16),
69 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4),
70 			CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
71 	QAT_SYM_AUTH_CAP(SHA256,
72 		CAP_SET(block_size, 64),
73 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1),
74 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
75 	QAT_SYM_AUTH_CAP(SHA384,
76 		CAP_SET(block_size, 128),
77 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1),
78 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
79 	QAT_SYM_AUTH_CAP(SHA512,
80 		CAP_SET(block_size, 128),
81 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1),
82 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
83 	QAT_SYM_PLAIN_AUTH_CAP(SHA3_256,
84 		CAP_SET(block_size, 136),
85 		CAP_RNG(digest_size, 32, 32, 0)),
86 	QAT_SYM_PLAIN_AUTH_CAP(SHA3_384,
87 		CAP_SET(block_size, 104),
88 		CAP_RNG(digest_size, 48, 48, 0)),
89 	QAT_SYM_PLAIN_AUTH_CAP(SHA3_512,
90 		CAP_SET(block_size, 72),
91 		CAP_RNG(digest_size, 64, 64, 0)),
92 	QAT_SYM_AUTH_CAP(SHA256_HMAC,
93 		CAP_SET(block_size, 64),
94 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1),
95 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
96 	QAT_SYM_AUTH_CAP(SHA384_HMAC,
97 		CAP_SET(block_size, 128),
98 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1),
99 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
100 	QAT_SYM_AUTH_CAP(SHA512_HMAC,
101 		CAP_SET(block_size, 128),
102 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1),
103 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
104 	QAT_SYM_AUTH_CAP(AES_XCBC_MAC,
105 		CAP_SET(block_size, 16),
106 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0),
107 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
108 	QAT_SYM_AUTH_CAP(SNOW3G_UIA2,
109 		CAP_SET(block_size, 16),
110 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
111 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)),
112 	QAT_SYM_AUTH_CAP(KASUMI_F9,
113 		CAP_SET(block_size, 8),
114 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
115 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
116 	QAT_SYM_AUTH_CAP(NULL,
117 		CAP_SET(block_size, 1),
118 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size),
119 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
120 	QAT_SYM_CIPHER_CAP(AES_CBC,
121 		CAP_SET(block_size, 16),
122 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
123 	QAT_SYM_CIPHER_CAP(AES_CTR,
124 		CAP_SET(block_size, 16),
125 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
126 	QAT_SYM_CIPHER_CAP(AES_XTS,
127 		CAP_SET(block_size, 16),
128 		CAP_RNG(key_size, 32, 64, 32), CAP_RNG(iv_size, 16, 16, 0)),
129 	QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,
130 		CAP_SET(block_size, 16),
131 		CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)),
132 	QAT_SYM_CIPHER_CAP(SNOW3G_UEA2,
133 		CAP_SET(block_size, 16),
134 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
135 	QAT_SYM_CIPHER_CAP(KASUMI_F8,
136 		CAP_SET(block_size, 8),
137 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 8, 8, 0)),
138 	QAT_SYM_CIPHER_CAP(NULL,
139 		CAP_SET(block_size, 1),
140 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)),
141 	QAT_SYM_CIPHER_CAP(ZUC_EEA3,
142 		CAP_SET(block_size, 16),
143 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
144 	QAT_SYM_AUTH_CAP(ZUC_EIA3,
145 		CAP_SET(block_size, 16),
146 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
147 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)),
148 	QAT_SYM_AEAD_CAP(CHACHA20_POLY1305,
149 		CAP_SET(block_size, 64),
150 		CAP_RNG(key_size, 32, 32, 0),
151 		CAP_RNG(digest_size, 16, 16, 0),
152 		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0)),
153 	QAT_SYM_CIPHER_CAP(SM4_CBC,
154 		CAP_SET(block_size, 16),
155 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
156 	QAT_SYM_CIPHER_CAP(SM4_CTR,
157 		CAP_SET(block_size, 16),
158 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
159 	QAT_SYM_PLAIN_AUTH_CAP(SM3,
160 		CAP_SET(block_size, 64),
161 		CAP_RNG(digest_size, 32, 32, 0)),
162 	QAT_SYM_AUTH_CAP(SM3_HMAC,
163 		CAP_SET(block_size, 64),
164 		CAP_RNG(key_size, 16, 64, 4), CAP_RNG(digest_size, 32, 32, 0),
165 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
166 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
167 };
168 
169 static int
170 check_cipher_capa(const struct rte_cryptodev_capabilities *cap,
171 		enum rte_crypto_cipher_algorithm algo)
172 {
173 	if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
174 		return 0;
175 	if (cap->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
176 		return 0;
177 	if (cap->sym.cipher.algo != algo)
178 		return 0;
179 	return 1;
180 }
181 
182 static int
183 check_auth_capa(const struct rte_cryptodev_capabilities *cap,
184 		enum rte_crypto_auth_algorithm algo)
185 {
186 	if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
187 		return 0;
188 	if (cap->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
189 		return 0;
190 	if (cap->sym.auth.algo != algo)
191 		return 0;
192 	return 1;
193 }
194 
195 static int
196 qat_sym_crypto_cap_get_gen3(struct qat_cryptodev_private *internals,
197 			const char *capa_memz_name, const uint16_t slice_map)
198 {
199 
200 	uint32_t i, iter = 0;
201 	uint32_t curr_capa = 0;
202 	uint32_t capa_num, legacy_capa_num;
203 	uint32_t size = sizeof(qat_sym_crypto_caps_gen3);
204 	uint32_t legacy_size = sizeof(qat_sym_crypto_legacy_caps_gen3);
205 	capa_num = size/sizeof(struct rte_cryptodev_capabilities);
206 	legacy_capa_num = legacy_size/sizeof(struct rte_cryptodev_capabilities);
207 	struct rte_cryptodev_capabilities *cap;
208 
209 	if (unlikely(qat_legacy_capa))
210 		size = size + legacy_size;
211 
212 	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
213 	if (internals->capa_mz == NULL) {
214 		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
215 				size, rte_socket_id(), 0);
216 		if (internals->capa_mz == NULL) {
217 			QAT_LOG(DEBUG,
218 				"Error allocating memzone for capabilities");
219 			return -1;
220 		}
221 	}
222 
223 	struct rte_cryptodev_capabilities *addr =
224 			(struct rte_cryptodev_capabilities *)
225 				internals->capa_mz->addr;
226 	struct rte_cryptodev_capabilities *capabilities;
227 
228 	if (unlikely(qat_legacy_capa)) {
229 		capabilities = qat_sym_crypto_legacy_caps_gen3;
230 		capa_num += legacy_capa_num;
231 	} else {
232 		capabilities = qat_sym_crypto_caps_gen3;
233 	}
234 
235 	for (i = 0; i < capa_num; i++, iter++) {
236 		if (unlikely(qat_legacy_capa) && (i == legacy_capa_num)) {
237 			capabilities = qat_sym_crypto_caps_gen3;
238 			addr += curr_capa;
239 			curr_capa = 0;
240 			iter = 0;
241 		}
242 
243 		if (slice_map & ICP_ACCEL_MASK_SM4_SLICE && (
244 			check_cipher_capa(&capabilities[iter],
245 				RTE_CRYPTO_CIPHER_SM4_ECB) ||
246 			check_cipher_capa(&capabilities[iter],
247 				RTE_CRYPTO_CIPHER_SM4_CBC) ||
248 			check_cipher_capa(&capabilities[iter],
249 				RTE_CRYPTO_CIPHER_SM4_CTR))) {
250 			continue;
251 		}
252 		if (slice_map & ICP_ACCEL_MASK_SM3_SLICE && (
253 			check_auth_capa(&capabilities[iter],
254 				RTE_CRYPTO_AUTH_SM3) ||
255 			check_auth_capa(&capabilities[iter],
256 				RTE_CRYPTO_AUTH_SM3_HMAC))) {
257 			continue;
258 		}
259 
260 		if (slice_map & ICP_ACCEL_MASK_ZUC_256_SLICE && (
261 			check_auth_capa(&capabilities[iter],
262 				RTE_CRYPTO_AUTH_ZUC_EIA3) ||
263 			check_cipher_capa(&capabilities[iter],
264 				RTE_CRYPTO_CIPHER_ZUC_EEA3))) {
265 			continue;
266 		}
267 
268 		if (internals->qat_dev->has_wireless_slice && (
269 			check_auth_capa(&capabilities[iter],
270 				RTE_CRYPTO_AUTH_KASUMI_F9) ||
271 			check_cipher_capa(&capabilities[iter],
272 				RTE_CRYPTO_CIPHER_KASUMI_F8) ||
273 			check_cipher_capa(&capabilities[iter],
274 				RTE_CRYPTO_CIPHER_DES_CBC) ||
275 			check_cipher_capa(&capabilities[iter],
276 				RTE_CRYPTO_CIPHER_DES_DOCSISBPI)))
277 			continue;
278 
279 		memcpy(addr + curr_capa, capabilities + iter,
280 			sizeof(struct rte_cryptodev_capabilities));
281 
282 		if (internals->qat_dev->has_wireless_slice && (
283 			check_auth_capa(&capabilities[iter],
284 				RTE_CRYPTO_AUTH_ZUC_EIA3))) {
285 			cap = addr + curr_capa;
286 			cap->sym.auth.key_size.max = 32;
287 			cap->sym.auth.key_size.increment = 16;
288 			cap->sym.auth.iv_size.max = 25;
289 			cap->sym.auth.iv_size.increment = 1;
290 			cap->sym.auth.digest_size.max = 16;
291 			cap->sym.auth.digest_size.increment = 4;
292 		}
293 		if (internals->qat_dev->has_wireless_slice && (
294 			check_cipher_capa(&capabilities[iter],
295 				RTE_CRYPTO_CIPHER_ZUC_EEA3))) {
296 			cap = addr + curr_capa;
297 			cap->sym.cipher.key_size.max = 32;
298 			cap->sym.cipher.key_size.increment = 16;
299 			cap->sym.cipher.iv_size.max = 25;
300 			cap->sym.cipher.iv_size.increment = 1;
301 		}
302 		curr_capa++;
303 	}
304 	internals->qat_dev_capabilities = internals->capa_mz->addr;
305 
306 	return 0;
307 }
308 
309 static __rte_always_inline void
310 enqueue_one_aead_job_gen3(struct qat_sym_session *ctx,
311 	struct icp_qat_fw_la_bulk_req *req,
312 	struct rte_crypto_va_iova_ptr *iv,
313 	struct rte_crypto_va_iova_ptr *digest,
314 	struct rte_crypto_va_iova_ptr *aad,
315 	union rte_crypto_sym_ofs ofs, uint32_t data_len)
316 {
317 	if (ctx->is_single_pass) {
318 		struct icp_qat_fw_la_cipher_req_params *cipher_param =
319 			(void *)&req->serv_specif_rqpars;
320 
321 		/* QAT GEN3 uses single pass to treat AEAD as
322 		 * cipher operation
323 		 */
324 		cipher_param = (void *)&req->serv_specif_rqpars;
325 
326 		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
327 		cipher_param->cipher_offset = ofs.ofs.cipher.head;
328 		cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
329 				ofs.ofs.cipher.tail;
330 
331 		cipher_param->spc_aad_addr = aad->iova;
332 		cipher_param->spc_auth_res_addr = digest->iova;
333 
334 		return;
335 	}
336 
337 	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
338 }
339 
340 static __rte_always_inline void
341 enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
342 	struct qat_sym_op_cookie *cookie,
343 	struct icp_qat_fw_la_bulk_req *req,
344 	struct rte_crypto_va_iova_ptr *digest,
345 	struct rte_crypto_va_iova_ptr *auth_iv,
346 	union rte_crypto_sym_ofs ofs, uint32_t data_len)
347 {
348 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl;
349 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
350 	uint32_t ver_key_offset;
351 	uint32_t auth_data_len = data_len - ofs.ofs.auth.head -
352 			ofs.ofs.auth.tail;
353 
354 	if (!ctx->is_single_pass_gmac ||
355 			(auth_data_len > QAT_AES_GMAC_SPC_MAX_SIZE)) {
356 		enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
357 				data_len);
358 		return;
359 	}
360 
361 	cipher_cd_ctrl = (void *) &req->cd_ctrl;
362 	cipher_param = (void *)&req->serv_specif_rqpars;
363 	ver_key_offset = sizeof(struct icp_qat_hw_auth_setup) +
364 			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
365 			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
366 			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
367 			sizeof(struct icp_qat_hw_cipher_config);
368 
369 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
370 		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
371 		/* AES-GMAC */
372 		qat_set_cipher_iv(cipher_param, auth_iv, ctx->auth_iv.length,
373 				req);
374 	}
375 
376 	/* Fill separate Content Descriptor for this op */
377 	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
378 			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
379 				ctx->cd.cipher.key :
380 				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
381 			ctx->auth_key_length);
382 	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
383 			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
384 				ICP_QAT_HW_CIPHER_AEAD_MODE,
385 				ctx->qat_cipher_alg,
386 				ICP_QAT_HW_CIPHER_NO_CONVERT,
387 				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
388 					ICP_QAT_HW_CIPHER_ENCRYPT :
389 					ICP_QAT_HW_CIPHER_DECRYPT));
390 	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
391 			ctx->digest_length,
392 			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
393 			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
394 	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
395 			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(auth_data_len);
396 
397 	/* Update the request */
398 	req->cd_pars.u.s.content_desc_addr =
399 			cookie->opt.spc_gmac.cd_phys_addr;
400 	req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
401 			sizeof(struct icp_qat_hw_cipher_config) +
402 			ctx->auth_key_length, 8) >> 3;
403 	req->comn_mid.src_length = data_len;
404 	req->comn_mid.dst_length = 0;
405 
406 	cipher_param->spc_aad_addr = 0;
407 	cipher_param->spc_auth_res_addr = digest->iova;
408 	cipher_param->spc_aad_sz = auth_data_len;
409 	cipher_param->reserved = 0;
410 	cipher_param->spc_auth_res_sz = ctx->digest_length;
411 
412 	req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
413 	cipher_cd_ctrl->cipher_cfg_offset = 0;
414 	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
415 	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
416 	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
417 			req->comn_hdr.serv_specif_flags,
418 			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
419 	ICP_QAT_FW_LA_PROTO_SET(
420 			req->comn_hdr.serv_specif_flags,
421 			ICP_QAT_FW_LA_NO_PROTO);
422 }
423 
424 static int
425 qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx,
426 		uint8_t *out_msg, void *op_cookie)
427 {
428 	register struct icp_qat_fw_la_bulk_req *req;
429 	struct rte_crypto_op *op = in_op;
430 	struct qat_sym_op_cookie *cookie = op_cookie;
431 	struct rte_crypto_sgl in_sgl, out_sgl;
432 	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
433 			out_vec[QAT_SYM_SGL_MAX_NUMBER];
434 	struct rte_crypto_va_iova_ptr cipher_iv;
435 	struct rte_crypto_va_iova_ptr aad;
436 	struct rte_crypto_va_iova_ptr digest;
437 	union rte_crypto_sym_ofs ofs;
438 	int32_t total_len;
439 
440 	in_sgl.vec = in_vec;
441 	out_sgl.vec = out_vec;
442 
443 	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
444 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
445 
446 	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
447 			&cipher_iv, &aad, &digest);
448 	if (unlikely(ofs.raw == UINT64_MAX)) {
449 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
450 		return -EINVAL;
451 	}
452 
453 	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
454 			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
455 	if (unlikely(total_len < 0)) {
456 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
457 		return -EINVAL;
458 	}
459 
460 	enqueue_one_aead_job_gen3(ctx, req, &cipher_iv, &digest, &aad, ofs,
461 		total_len);
462 
463 	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
464 			NULL, &aad, &digest);
465 
466 	return 0;
467 }
468 
469 static int
470 qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
471 		uint8_t *out_msg, void *op_cookie)
472 {
473 	register struct icp_qat_fw_la_bulk_req *req;
474 	struct rte_crypto_op *op = in_op;
475 	struct qat_sym_op_cookie *cookie = op_cookie;
476 	struct rte_crypto_sgl in_sgl, out_sgl;
477 	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
478 			out_vec[QAT_SYM_SGL_MAX_NUMBER];
479 	struct rte_crypto_va_iova_ptr auth_iv;
480 	struct rte_crypto_va_iova_ptr digest;
481 	union rte_crypto_sym_ofs ofs;
482 	int32_t total_len;
483 
484 	in_sgl.vec = in_vec;
485 	out_sgl.vec = out_vec;
486 
487 	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
488 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
489 
490 	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
491 			NULL, &auth_iv, &digest, op_cookie);
492 	if (unlikely(ofs.raw == UINT64_MAX)) {
493 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
494 		return -EINVAL;
495 	}
496 
497 	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
498 			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
499 	if (unlikely(total_len < 0)) {
500 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
501 		return -EINVAL;
502 	}
503 
504 	enqueue_one_auth_job_gen3(ctx, cookie, req, &digest, &auth_iv,
505 			ofs, total_len);
506 
507 	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
508 			&auth_iv, NULL, &digest);
509 
510 	return 0;
511 }
512 
513 static int
514 qat_sym_crypto_set_session_gen3(void *cdev, void *session)
515 {
516 	struct qat_sym_session *ctx = session;
517 	enum rte_proc_type_t proc_type = rte_eal_process_type();
518 	int ret;
519 	struct qat_cryptodev_private *internals;
520 
521 	internals = ((struct rte_cryptodev *)cdev)->data->dev_private;
522 
523 	if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
524 		return -EINVAL;
525 
526 	ret = qat_sym_crypto_set_session_gen1(cdev, session);
527 	/* special single pass build request for GEN3 */
528 	if (ctx->is_single_pass)
529 		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen3;
530 	else if (ctx->is_single_pass_gmac)
531 		ctx->build_request[proc_type] = qat_sym_build_op_auth_gen3;
532 
533 	if (ret == -ENOTSUP) {
534 		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
535 		 * this is addressed by GEN3
536 		 */
537 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
538 				ctx->qat_cipher_alg !=
539 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
540 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
541 				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
542 		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
543 				ctx->qat_cipher_alg !=
544 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
545 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
546 				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
547 		} else if ((ctx->aes_cmac ||
548 				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
549 				(ctx->qat_cipher_alg ==
550 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
551 				ctx->qat_cipher_alg ==
552 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
553 			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
554 		} else if ((internals->qat_dev->has_wireless_slice) &&
555 				((ctx->aes_cmac ||
556 				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
557 				(ctx->qat_cipher_alg ==
558 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
559 				ctx->qat_cipher_alg ==
560 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 ||
561 				ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_256))) {
562 			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
563 		} else if ((internals->qat_dev->has_wireless_slice) &&
564 			(ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32 ||
565 				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64 ||
566 				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128) &&
567 				ctx->qat_cipher_alg != ICP_QAT_HW_CIPHER_ALGO_ZUC_256) {
568 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
569 					1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
570 		}
571 
572 		ret = 0;
573 	}
574 
575 	return ret;
576 }
577 
578 static int
579 qat_sym_dp_enqueue_single_aead_gen3(void *qp_data, uint8_t *drv_ctx,
580 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
581 	union rte_crypto_sym_ofs ofs,
582 	struct rte_crypto_va_iova_ptr *iv,
583 	struct rte_crypto_va_iova_ptr *digest,
584 	struct rte_crypto_va_iova_ptr *aad,
585 	void *user_data)
586 {
587 	struct qat_qp *qp = qp_data;
588 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
589 	struct qat_queue *tx_queue = &qp->tx_q;
590 	struct qat_sym_op_cookie *cookie;
591 	struct qat_sym_session *ctx = dp_ctx->session;
592 	struct icp_qat_fw_la_bulk_req *req;
593 
594 	int32_t data_len;
595 	uint32_t tail = dp_ctx->tail;
596 
597 	req = (struct icp_qat_fw_la_bulk_req *)(
598 		(uint8_t *)tx_queue->base_addr + tail);
599 	cookie = qp->op_cookies[tail >> tx_queue->trailz];
600 	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
601 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
602 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
603 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
604 			data, n_data_vecs, NULL, 0);
605 	if (unlikely(data_len < 0))
606 		return -1;
607 
608 	enqueue_one_aead_job_gen3(ctx, req, iv, digest, aad, ofs,
609 		(uint32_t)data_len);
610 
611 	dp_ctx->tail = tail;
612 	dp_ctx->cached_enqueue++;
613 
614 	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
615 			NULL, aad, digest);
616 
617 	return 0;
618 }
619 
620 static uint32_t
621 qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
622 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
623 	void *user_data[], int *status)
624 {
625 	struct qat_qp *qp = qp_data;
626 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
627 	struct qat_queue *tx_queue = &qp->tx_q;
628 	struct qat_sym_session *ctx = dp_ctx->session;
629 	uint32_t i, n;
630 	uint32_t tail;
631 	struct icp_qat_fw_la_bulk_req *req;
632 	int32_t data_len;
633 
634 	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
635 	if (unlikely(n == 0)) {
636 		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
637 		*status = 0;
638 		return 0;
639 	}
640 
641 	tail = dp_ctx->tail;
642 
643 	for (i = 0; i < n; i++) {
644 		struct qat_sym_op_cookie *cookie =
645 			qp->op_cookies[tail >> tx_queue->trailz];
646 
647 		req  = (struct icp_qat_fw_la_bulk_req *)(
648 			(uint8_t *)tx_queue->base_addr + tail);
649 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
650 
651 		if (vec->dest_sgl) {
652 			data_len = qat_sym_build_req_set_data(req,
653 				user_data[i], cookie,
654 				vec->src_sgl[i].vec, vec->src_sgl[i].num,
655 				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
656 		} else {
657 			data_len = qat_sym_build_req_set_data(req,
658 				user_data[i], cookie,
659 				vec->src_sgl[i].vec,
660 				vec->src_sgl[i].num, NULL, 0);
661 		}
662 
663 		if (unlikely(data_len < 0))
664 			break;
665 
666 		enqueue_one_aead_job_gen3(ctx, req, &vec->iv[i],
667 				&vec->digest[i], &vec->aad[i], ofs,
668 				(uint32_t)data_len);
669 
670 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
671 
672 		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
673 				vec->src_sgl[i].num, &vec->iv[i], NULL,
674 				&vec->aad[i], &vec->digest[i]);
675 	}
676 
677 	if (unlikely(i < n))
678 		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
679 
680 	dp_ctx->tail = tail;
681 	dp_ctx->cached_enqueue += i;
682 	*status = 0;
683 	return i;
684 }
685 
686 static int
687 qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx,
688 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
689 	union rte_crypto_sym_ofs ofs,
690 	struct rte_crypto_va_iova_ptr *iv __rte_unused,
691 	struct rte_crypto_va_iova_ptr *digest,
692 	struct rte_crypto_va_iova_ptr *auth_iv,
693 	void *user_data)
694 {
695 	struct qat_qp *qp = qp_data;
696 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
697 	struct qat_queue *tx_queue = &qp->tx_q;
698 	struct qat_sym_op_cookie *cookie;
699 	struct qat_sym_session *ctx = dp_ctx->session;
700 	struct icp_qat_fw_la_bulk_req *req;
701 	int32_t data_len;
702 	uint32_t tail = dp_ctx->tail;
703 	struct rte_crypto_va_iova_ptr null_digest;
704 	struct rte_crypto_va_iova_ptr *job_digest = digest;
705 
706 	req = (struct icp_qat_fw_la_bulk_req *)(
707 		(uint8_t *)tx_queue->base_addr + tail);
708 	cookie = qp->op_cookies[tail >> tx_queue->trailz];
709 	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
710 
711 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
712 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
713 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
714 			data, n_data_vecs, NULL, 0);
715 	if (unlikely(data_len < 0))
716 		return -1;
717 
718 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
719 		null_digest.iova = cookie->digest_null_phys_addr;
720 		job_digest = &null_digest;
721 	}
722 
723 	enqueue_one_auth_job_gen3(ctx, cookie, req, job_digest, auth_iv, ofs,
724 			(uint32_t)data_len);
725 
726 	dp_ctx->tail = tail;
727 	dp_ctx->cached_enqueue++;
728 
729 	return 0;
730 }
731 
732 static uint32_t
733 qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
734 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
735 	void *user_data[], int *status)
736 {
737 	struct qat_qp *qp = qp_data;
738 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
739 	struct qat_queue *tx_queue = &qp->tx_q;
740 	struct qat_sym_session *ctx = dp_ctx->session;
741 	uint32_t i, n;
742 	uint32_t tail;
743 	struct icp_qat_fw_la_bulk_req *req;
744 	int32_t data_len;
745 	struct rte_crypto_va_iova_ptr null_digest;
746 	struct rte_crypto_va_iova_ptr *job_digest = NULL;
747 
748 	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
749 	if (unlikely(n == 0)) {
750 		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
751 		*status = 0;
752 		return 0;
753 	}
754 
755 	tail = dp_ctx->tail;
756 
757 	for (i = 0; i < n; i++) {
758 		struct qat_sym_op_cookie *cookie =
759 			qp->op_cookies[tail >> tx_queue->trailz];
760 
761 		req  = (struct icp_qat_fw_la_bulk_req *)(
762 			(uint8_t *)tx_queue->base_addr + tail);
763 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
764 
765 		if (vec->dest_sgl) {
766 			data_len = qat_sym_build_req_set_data(req,
767 				user_data[i], cookie,
768 				vec->src_sgl[i].vec, vec->src_sgl[i].num,
769 				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
770 		} else {
771 			data_len = qat_sym_build_req_set_data(req,
772 				user_data[i], cookie,
773 				vec->src_sgl[i].vec,
774 				vec->src_sgl[i].num, NULL, 0);
775 		}
776 
777 		if (unlikely(data_len < 0))
778 			break;
779 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
780 			null_digest.iova = cookie->digest_null_phys_addr;
781 			job_digest = &null_digest;
782 		} else
783 			job_digest = &vec->digest[i];
784 
785 		enqueue_one_auth_job_gen3(ctx, cookie, req, job_digest,
786 			&vec->auth_iv[i], ofs, (uint32_t)data_len);
787 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
788 	}
789 
790 	if (unlikely(i < n))
791 		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
792 
793 	dp_ctx->tail = tail;
794 	dp_ctx->cached_enqueue += i;
795 	*status = 0;
796 	return i;
797 }
798 
799 static int
800 qat_sym_configure_raw_dp_ctx_gen3(void *_raw_dp_ctx, void *_ctx)
801 {
802 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
803 	struct qat_sym_session *ctx = _ctx;
804 	int ret;
805 
806 	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
807 	if (ret < 0)
808 		return ret;
809 
810 	if (ctx->is_single_pass) {
811 		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen3;
812 		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen3;
813 	} else if (ctx->is_single_pass_gmac) {
814 		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen3;
815 		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen3;
816 	}
817 
818 	return 0;
819 }
820 
821 
822 RTE_INIT(qat_sym_crypto_gen3_init)
823 {
824 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
825 	qat_sym_gen_dev_ops[QAT_GEN3].get_capabilities =
826 			qat_sym_crypto_cap_get_gen3;
827 	qat_sym_gen_dev_ops[QAT_GEN3].get_feature_flags =
828 			qat_sym_crypto_feature_flags_get_gen1;
829 	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
830 			qat_sym_crypto_set_session_gen3;
831 	qat_sym_gen_dev_ops[QAT_GEN3].set_raw_dp_ctx =
832 			qat_sym_configure_raw_dp_ctx_gen3;
833 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
834 			qat_sym_create_security_gen1;
835 }
836 
837 RTE_INIT(qat_asym_crypto_gen3_init)
838 {
839 	qat_asym_gen_dev_ops[QAT_GEN3].cryptodev_ops =
840 			&qat_asym_crypto_ops_gen1;
841 	qat_asym_gen_dev_ops[QAT_GEN3].get_capabilities =
842 			qat_asym_crypto_cap_get_gen1;
843 	qat_asym_gen_dev_ops[QAT_GEN3].get_feature_flags =
844 			qat_asym_crypto_feature_flags_get_gen1;
845 	qat_asym_gen_dev_ops[QAT_GEN3].set_session =
846 			qat_asym_crypto_set_session_gen1;
847 }
848