xref: /dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c (revision b7bd72d8da9c13deba44b1ac9f7dfa8cda77f240)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2022 Intel Corporation
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <cryptodev_pmd.h>
7 #include "qat_sym_session.h"
8 #include "qat_sym.h"
9 #include "qat_asym.h"
10 #include "qat_crypto.h"
11 #include "qat_crypto_pmd_gens.h"
12 
13 
14 static struct rte_cryptodev_capabilities qat_sym_crypto_legacy_caps_gen3[] = {
15 	QAT_SYM_CIPHER_CAP(3DES_CBC,
16 		CAP_SET(block_size, 8),
17 		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
18 	QAT_SYM_CIPHER_CAP(DES_CBC,
19 		CAP_SET(block_size, 8),
20 		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
21 	QAT_SYM_CIPHER_CAP(3DES_CTR,
22 		CAP_SET(block_size, 8),
23 		CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
24 	QAT_SYM_PLAIN_AUTH_CAP(SHA1,
25 		CAP_SET(block_size, 64),
26 		CAP_RNG(digest_size, 1, 20, 1)),
27 	QAT_SYM_AUTH_CAP(SHA224,
28 		CAP_SET(block_size, 64),
29 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1),
30 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
31 	QAT_SYM_AUTH_CAP(SHA224_HMAC,
32 		CAP_SET(block_size, 64),
33 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1),
34 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
35 	QAT_SYM_AUTH_CAP(SHA1_HMAC,
36 		CAP_SET(block_size, 64),
37 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1),
38 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
39 	QAT_SYM_AUTH_CAP(MD5_HMAC,
40 		CAP_SET(block_size, 64),
41 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1),
42 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
43 	QAT_SYM_CIPHER_CAP(DES_DOCSISBPI,
44 		CAP_SET(block_size, 8),
45 		CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0)),
46 	QAT_SYM_PLAIN_AUTH_CAP(SHA3_224,
47 		CAP_SET(block_size, 144),
48 		CAP_RNG(digest_size, 28, 28, 0)),
49 	QAT_SYM_CIPHER_CAP(SM4_ECB,
50 		CAP_SET(block_size, 16),
51 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 0, 0, 0))
52 };
53 
54 static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen3[] = {
55 	QAT_SYM_AEAD_CAP(AES_GCM,
56 		CAP_SET(block_size, 16),
57 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
58 		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)),
59 	QAT_SYM_AEAD_CAP(AES_CCM,
60 		CAP_SET(block_size, 16),
61 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2),
62 		CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)),
63 	QAT_SYM_AUTH_CAP(AES_GMAC,
64 		CAP_SET(block_size, 16),
65 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
66 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)),
67 	QAT_SYM_AUTH_CAP(AES_CMAC,
68 		CAP_SET(block_size, 16),
69 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4),
70 			CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
71 	QAT_SYM_AUTH_CAP(SHA256,
72 		CAP_SET(block_size, 64),
73 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1),
74 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
75 	QAT_SYM_AUTH_CAP(SHA384,
76 		CAP_SET(block_size, 128),
77 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1),
78 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
79 	QAT_SYM_AUTH_CAP(SHA512,
80 		CAP_SET(block_size, 128),
81 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1),
82 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
83 	QAT_SYM_PLAIN_AUTH_CAP(SHA3_256,
84 		CAP_SET(block_size, 136),
85 		CAP_RNG(digest_size, 32, 32, 0)),
86 	QAT_SYM_PLAIN_AUTH_CAP(SHA3_384,
87 		CAP_SET(block_size, 104),
88 		CAP_RNG(digest_size, 48, 48, 0)),
89 	QAT_SYM_PLAIN_AUTH_CAP(SHA3_512,
90 		CAP_SET(block_size, 72),
91 		CAP_RNG(digest_size, 64, 64, 0)),
92 	QAT_SYM_AUTH_CAP(SHA256_HMAC,
93 		CAP_SET(block_size, 64),
94 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1),
95 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
96 	QAT_SYM_AUTH_CAP(SHA384_HMAC,
97 		CAP_SET(block_size, 128),
98 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1),
99 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
100 	QAT_SYM_AUTH_CAP(SHA512_HMAC,
101 		CAP_SET(block_size, 128),
102 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1),
103 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
104 	QAT_SYM_AUTH_CAP(AES_XCBC_MAC,
105 		CAP_SET(block_size, 16),
106 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0),
107 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
108 	QAT_SYM_AUTH_CAP(SNOW3G_UIA2,
109 		CAP_SET(block_size, 16),
110 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
111 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)),
112 	QAT_SYM_AUTH_CAP(KASUMI_F9,
113 		CAP_SET(block_size, 8),
114 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
115 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
116 	QAT_SYM_AUTH_CAP(NULL,
117 		CAP_SET(block_size, 1),
118 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size),
119 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
120 	QAT_SYM_CIPHER_CAP(AES_CBC,
121 		CAP_SET(block_size, 16),
122 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
123 	QAT_SYM_CIPHER_CAP(AES_CTR,
124 		CAP_SET(block_size, 16),
125 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
126 	QAT_SYM_CIPHER_CAP(AES_XTS,
127 		CAP_SET(block_size, 16),
128 		CAP_RNG(key_size, 32, 64, 32), CAP_RNG(iv_size, 16, 16, 0)),
129 	QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,
130 		CAP_SET(block_size, 16),
131 		CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)),
132 	QAT_SYM_CIPHER_CAP(SNOW3G_UEA2,
133 		CAP_SET(block_size, 16),
134 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
135 	QAT_SYM_CIPHER_CAP(KASUMI_F8,
136 		CAP_SET(block_size, 8),
137 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 8, 8, 0)),
138 	QAT_SYM_CIPHER_CAP(NULL,
139 		CAP_SET(block_size, 1),
140 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)),
141 	QAT_SYM_CIPHER_CAP(ZUC_EEA3,
142 		CAP_SET(block_size, 16),
143 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
144 	QAT_SYM_AUTH_CAP(ZUC_EIA3,
145 		CAP_SET(block_size, 16),
146 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
147 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)),
148 	QAT_SYM_AEAD_CAP(CHACHA20_POLY1305,
149 		CAP_SET(block_size, 64),
150 		CAP_RNG(key_size, 32, 32, 0),
151 		CAP_RNG(digest_size, 16, 16, 0),
152 		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0)),
153 	QAT_SYM_CIPHER_CAP(SM4_CBC,
154 		CAP_SET(block_size, 16),
155 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
156 	QAT_SYM_CIPHER_CAP(SM4_CTR,
157 		CAP_SET(block_size, 16),
158 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
159 	QAT_SYM_PLAIN_AUTH_CAP(SM3,
160 		CAP_SET(block_size, 64),
161 		CAP_RNG(digest_size, 32, 32, 0)),
162 	QAT_SYM_AUTH_CAP(SM3_HMAC,
163 		CAP_SET(block_size, 64),
164 		CAP_RNG(key_size, 16, 64, 4), CAP_RNG(digest_size, 32, 32, 0),
165 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
166 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
167 };
168 
169 static int
check_cipher_capa(const struct rte_cryptodev_capabilities * cap,enum rte_crypto_cipher_algorithm algo)170 check_cipher_capa(const struct rte_cryptodev_capabilities *cap,
171 		enum rte_crypto_cipher_algorithm algo)
172 {
173 	if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
174 		return 0;
175 	if (cap->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
176 		return 0;
177 	if (cap->sym.cipher.algo != algo)
178 		return 0;
179 	return 1;
180 }
181 
182 static int
check_auth_capa(const struct rte_cryptodev_capabilities * cap,enum rte_crypto_auth_algorithm algo)183 check_auth_capa(const struct rte_cryptodev_capabilities *cap,
184 		enum rte_crypto_auth_algorithm algo)
185 {
186 	if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
187 		return 0;
188 	if (cap->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
189 		return 0;
190 	if (cap->sym.auth.algo != algo)
191 		return 0;
192 	return 1;
193 }
194 
195 static int
qat_sym_crypto_cap_get_gen3(struct qat_cryptodev_private * internals,const char * capa_memz_name,const uint16_t slice_map)196 qat_sym_crypto_cap_get_gen3(struct qat_cryptodev_private *internals,
197 			const char *capa_memz_name, const uint16_t slice_map)
198 {
199 
200 	uint32_t i, iter = 0;
201 	uint32_t curr_capa = 0;
202 	uint32_t capa_num, legacy_capa_num;
203 	uint32_t size = sizeof(qat_sym_crypto_caps_gen3);
204 	uint32_t legacy_size = sizeof(qat_sym_crypto_legacy_caps_gen3);
205 	capa_num = size/sizeof(struct rte_cryptodev_capabilities);
206 	legacy_capa_num = legacy_size/sizeof(struct rte_cryptodev_capabilities);
207 	struct rte_cryptodev_capabilities *cap;
208 
209 	if (unlikely(internals->qat_dev->options.legacy_alg))
210 		size = size + legacy_size;
211 
212 	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
213 	if (internals->capa_mz == NULL) {
214 		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
215 				size, rte_socket_id(), 0);
216 		if (internals->capa_mz == NULL) {
217 			QAT_LOG(DEBUG,
218 				"Error allocating memzone for capabilities");
219 			return -1;
220 		}
221 	}
222 
223 	struct rte_cryptodev_capabilities *addr =
224 			(struct rte_cryptodev_capabilities *)
225 				internals->capa_mz->addr;
226 	struct rte_cryptodev_capabilities *capabilities;
227 
228 	if (unlikely(internals->qat_dev->options.legacy_alg)) {
229 		capabilities = qat_sym_crypto_legacy_caps_gen3;
230 		capa_num += legacy_capa_num;
231 	} else {
232 		capabilities = qat_sym_crypto_caps_gen3;
233 	}
234 
235 	for (i = 0; i < capa_num; i++, iter++) {
236 		if (unlikely(internals->qat_dev->options.legacy_alg) &&
237 				(i == legacy_capa_num)) {
238 			capabilities = qat_sym_crypto_caps_gen3;
239 			addr += curr_capa;
240 			curr_capa = 0;
241 			iter = 0;
242 		}
243 
244 		if (slice_map & ICP_ACCEL_MASK_SM4_SLICE && (
245 			check_cipher_capa(&capabilities[iter],
246 				RTE_CRYPTO_CIPHER_SM4_ECB) ||
247 			check_cipher_capa(&capabilities[iter],
248 				RTE_CRYPTO_CIPHER_SM4_CBC) ||
249 			check_cipher_capa(&capabilities[iter],
250 				RTE_CRYPTO_CIPHER_SM4_CTR))) {
251 			continue;
252 		}
253 		if (slice_map & ICP_ACCEL_MASK_SM3_SLICE && (
254 			check_auth_capa(&capabilities[iter],
255 				RTE_CRYPTO_AUTH_SM3) ||
256 			check_auth_capa(&capabilities[iter],
257 				RTE_CRYPTO_AUTH_SM3_HMAC))) {
258 			continue;
259 		}
260 
261 		if (slice_map & ICP_ACCEL_MASK_ZUC_256_SLICE && (
262 			check_auth_capa(&capabilities[iter],
263 				RTE_CRYPTO_AUTH_ZUC_EIA3) ||
264 			check_cipher_capa(&capabilities[iter],
265 				RTE_CRYPTO_CIPHER_ZUC_EEA3))) {
266 			continue;
267 		}
268 
269 		if (internals->qat_dev->options.has_wireless_slice && (
270 			check_auth_capa(&capabilities[iter],
271 				RTE_CRYPTO_AUTH_KASUMI_F9) ||
272 			check_cipher_capa(&capabilities[iter],
273 				RTE_CRYPTO_CIPHER_KASUMI_F8) ||
274 			check_cipher_capa(&capabilities[iter],
275 				RTE_CRYPTO_CIPHER_DES_CBC) ||
276 			check_cipher_capa(&capabilities[iter],
277 				RTE_CRYPTO_CIPHER_DES_DOCSISBPI)))
278 			continue;
279 
280 		memcpy(addr + curr_capa, capabilities + iter,
281 			sizeof(struct rte_cryptodev_capabilities));
282 
283 		if (internals->qat_dev->options.has_wireless_slice && (
284 			check_auth_capa(&capabilities[iter],
285 				RTE_CRYPTO_AUTH_ZUC_EIA3))) {
286 			cap = addr + curr_capa;
287 			cap->sym.auth.key_size.max = 32;
288 			cap->sym.auth.key_size.increment = 16;
289 			cap->sym.auth.iv_size.max = 25;
290 			cap->sym.auth.iv_size.increment = 1;
291 			cap->sym.auth.digest_size.max = 16;
292 			cap->sym.auth.digest_size.increment = 4;
293 		}
294 		if (internals->qat_dev->options.has_wireless_slice && (
295 			check_cipher_capa(&capabilities[iter],
296 				RTE_CRYPTO_CIPHER_ZUC_EEA3))) {
297 			cap = addr + curr_capa;
298 			cap->sym.cipher.key_size.max = 32;
299 			cap->sym.cipher.key_size.increment = 16;
300 			cap->sym.cipher.iv_size.max = 25;
301 			cap->sym.cipher.iv_size.increment = 1;
302 		}
303 		curr_capa++;
304 	}
305 	internals->qat_dev_capabilities = internals->capa_mz->addr;
306 
307 	return 0;
308 }
309 
310 static __rte_always_inline void
enqueue_one_aead_job_gen3(struct qat_sym_session * ctx,struct icp_qat_fw_la_bulk_req * req,struct rte_crypto_va_iova_ptr * iv,struct rte_crypto_va_iova_ptr * digest,struct rte_crypto_va_iova_ptr * aad,union rte_crypto_sym_ofs ofs,uint32_t data_len)311 enqueue_one_aead_job_gen3(struct qat_sym_session *ctx,
312 	struct icp_qat_fw_la_bulk_req *req,
313 	struct rte_crypto_va_iova_ptr *iv,
314 	struct rte_crypto_va_iova_ptr *digest,
315 	struct rte_crypto_va_iova_ptr *aad,
316 	union rte_crypto_sym_ofs ofs, uint32_t data_len)
317 {
318 	if (ctx->is_single_pass) {
319 		struct icp_qat_fw_la_cipher_req_params *cipher_param =
320 			(void *)&req->serv_specif_rqpars;
321 
322 		/* QAT GEN3 uses single pass to treat AEAD as
323 		 * cipher operation
324 		 */
325 		cipher_param = (void *)&req->serv_specif_rqpars;
326 
327 		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
328 		cipher_param->cipher_offset = ofs.ofs.cipher.head;
329 		cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
330 				ofs.ofs.cipher.tail;
331 
332 		cipher_param->spc_aad_addr = aad->iova;
333 		cipher_param->spc_auth_res_addr = digest->iova;
334 
335 		return;
336 	}
337 
338 	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
339 }
340 
341 static __rte_always_inline void
enqueue_one_auth_job_gen3(struct qat_sym_session * ctx,struct qat_sym_op_cookie * cookie,struct icp_qat_fw_la_bulk_req * req,struct rte_crypto_va_iova_ptr * digest,struct rte_crypto_va_iova_ptr * auth_iv,union rte_crypto_sym_ofs ofs,uint32_t data_len)342 enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
343 	struct qat_sym_op_cookie *cookie,
344 	struct icp_qat_fw_la_bulk_req *req,
345 	struct rte_crypto_va_iova_ptr *digest,
346 	struct rte_crypto_va_iova_ptr *auth_iv,
347 	union rte_crypto_sym_ofs ofs, uint32_t data_len)
348 {
349 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl;
350 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
351 	uint32_t ver_key_offset;
352 	uint32_t auth_data_len = data_len - ofs.ofs.auth.head -
353 			ofs.ofs.auth.tail;
354 
355 	if (!ctx->is_single_pass_gmac ||
356 			(auth_data_len > QAT_AES_GMAC_SPC_MAX_SIZE)) {
357 		enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
358 				data_len);
359 		return;
360 	}
361 
362 	cipher_cd_ctrl = (void *) &req->cd_ctrl;
363 	cipher_param = (void *)&req->serv_specif_rqpars;
364 	ver_key_offset = sizeof(struct icp_qat_hw_auth_setup) +
365 			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
366 			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
367 			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
368 			sizeof(struct icp_qat_hw_cipher_config);
369 
370 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
371 		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
372 		/* AES-GMAC */
373 		qat_set_cipher_iv(cipher_param, auth_iv, ctx->auth_iv.length,
374 				req);
375 	}
376 
377 	/* Fill separate Content Descriptor for this op */
378 	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
379 			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
380 				ctx->cd.cipher.key :
381 				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
382 			ctx->auth_key_length);
383 	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
384 			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
385 				ICP_QAT_HW_CIPHER_AEAD_MODE,
386 				ctx->qat_cipher_alg,
387 				ICP_QAT_HW_CIPHER_NO_CONVERT,
388 				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
389 					ICP_QAT_HW_CIPHER_ENCRYPT :
390 					ICP_QAT_HW_CIPHER_DECRYPT));
391 	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
392 			ctx->digest_length,
393 			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
394 			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
395 	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
396 			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(auth_data_len);
397 
398 	/* Update the request */
399 	req->cd_pars.u.s.content_desc_addr =
400 			cookie->opt.spc_gmac.cd_phys_addr;
401 	req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
402 			sizeof(struct icp_qat_hw_cipher_config) +
403 			ctx->auth_key_length, 8) >> 3;
404 	req->comn_mid.src_length = data_len;
405 	req->comn_mid.dst_length = 0;
406 
407 	cipher_param->spc_aad_addr = 0;
408 	cipher_param->spc_auth_res_addr = digest->iova;
409 	cipher_param->spc_aad_sz = auth_data_len;
410 	cipher_param->reserved = 0;
411 	cipher_param->spc_auth_res_sz = ctx->digest_length;
412 
413 	req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
414 	cipher_cd_ctrl->cipher_cfg_offset = 0;
415 	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
416 	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
417 	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
418 			req->comn_hdr.serv_specif_flags,
419 			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
420 	ICP_QAT_FW_LA_PROTO_SET(
421 			req->comn_hdr.serv_specif_flags,
422 			ICP_QAT_FW_LA_NO_PROTO);
423 }
424 
425 static int
qat_sym_build_op_aead_gen3(void * in_op,struct qat_sym_session * ctx,uint8_t * out_msg,void * op_cookie)426 qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx,
427 		uint8_t *out_msg, void *op_cookie)
428 {
429 	register struct icp_qat_fw_la_bulk_req *req;
430 	struct rte_crypto_op *op = in_op;
431 	struct qat_sym_op_cookie *cookie = op_cookie;
432 	struct rte_crypto_sgl in_sgl, out_sgl;
433 	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
434 			out_vec[QAT_SYM_SGL_MAX_NUMBER];
435 	struct rte_crypto_va_iova_ptr cipher_iv;
436 	struct rte_crypto_va_iova_ptr aad;
437 	struct rte_crypto_va_iova_ptr digest;
438 	union rte_crypto_sym_ofs ofs;
439 	int32_t total_len;
440 
441 	in_sgl.vec = in_vec;
442 	out_sgl.vec = out_vec;
443 
444 	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
445 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
446 
447 	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
448 			&cipher_iv, &aad, &digest);
449 	if (unlikely(ofs.raw == UINT64_MAX)) {
450 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
451 		return -EINVAL;
452 	}
453 
454 	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
455 			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
456 	if (unlikely(total_len < 0)) {
457 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
458 		return -EINVAL;
459 	}
460 
461 	enqueue_one_aead_job_gen3(ctx, req, &cipher_iv, &digest, &aad, ofs,
462 		total_len);
463 
464 	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
465 			NULL, &aad, &digest);
466 
467 	return 0;
468 }
469 
470 static int
qat_sym_build_op_auth_gen3(void * in_op,struct qat_sym_session * ctx,uint8_t * out_msg,void * op_cookie)471 qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
472 		uint8_t *out_msg, void *op_cookie)
473 {
474 	register struct icp_qat_fw_la_bulk_req *req;
475 	struct rte_crypto_op *op = in_op;
476 	struct qat_sym_op_cookie *cookie = op_cookie;
477 	struct rte_crypto_sgl in_sgl, out_sgl;
478 	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
479 			out_vec[QAT_SYM_SGL_MAX_NUMBER];
480 	struct rte_crypto_va_iova_ptr auth_iv;
481 	struct rte_crypto_va_iova_ptr digest;
482 	union rte_crypto_sym_ofs ofs;
483 	int32_t total_len;
484 
485 	in_sgl.vec = in_vec;
486 	out_sgl.vec = out_vec;
487 
488 	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
489 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
490 
491 	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
492 			NULL, &auth_iv, &digest, op_cookie);
493 	if (unlikely(ofs.raw == UINT64_MAX)) {
494 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
495 		return -EINVAL;
496 	}
497 
498 	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
499 			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
500 	if (unlikely(total_len < 0)) {
501 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
502 		return -EINVAL;
503 	}
504 
505 	enqueue_one_auth_job_gen3(ctx, cookie, req, &digest, &auth_iv,
506 			ofs, total_len);
507 
508 	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
509 			&auth_iv, NULL, &digest);
510 
511 	return 0;
512 }
513 
514 static int
qat_sym_crypto_set_session_gen3(void * cdev,void * session)515 qat_sym_crypto_set_session_gen3(void *cdev, void *session)
516 {
517 	struct qat_sym_session *ctx = session;
518 	enum rte_proc_type_t proc_type = rte_eal_process_type();
519 	int ret;
520 	struct qat_cryptodev_private *internals;
521 
522 	internals = ((struct rte_cryptodev *)cdev)->data->dev_private;
523 
524 	if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
525 		return -EINVAL;
526 
527 	ret = qat_sym_crypto_set_session_gen1(cdev, session);
528 	/* special single pass build request for GEN3 */
529 	if (ctx->is_single_pass)
530 		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen3;
531 	else if (ctx->is_single_pass_gmac)
532 		ctx->build_request[proc_type] = qat_sym_build_op_auth_gen3;
533 
534 	if (ret == -ENOTSUP) {
535 		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
536 		 * this is addressed by GEN3
537 		 */
538 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
539 				ctx->qat_cipher_alg !=
540 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
541 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
542 				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
543 		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
544 				ctx->qat_cipher_alg !=
545 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
546 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
547 				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
548 		} else if ((ctx->aes_cmac ||
549 				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
550 				(ctx->qat_cipher_alg ==
551 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
552 				ctx->qat_cipher_alg ==
553 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
554 			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
555 		} else if ((internals->qat_dev->options.has_wireless_slice) &&
556 				((ctx->aes_cmac ||
557 				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
558 				(ctx->qat_cipher_alg ==
559 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
560 				ctx->qat_cipher_alg ==
561 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 ||
562 				ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_256))) {
563 			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
564 		} else if ((internals->qat_dev->options.has_wireless_slice) &&
565 			(ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32 ||
566 				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64 ||
567 				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128) &&
568 				ctx->qat_cipher_alg != ICP_QAT_HW_CIPHER_ALGO_ZUC_256) {
569 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
570 					1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
571 		}
572 
573 		ret = 0;
574 	}
575 
576 	return ret;
577 }
578 
579 static int
qat_sym_dp_enqueue_single_aead_gen3(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_vec * data,uint16_t n_data_vecs,union rte_crypto_sym_ofs ofs,struct rte_crypto_va_iova_ptr * iv,struct rte_crypto_va_iova_ptr * digest,struct rte_crypto_va_iova_ptr * aad,void * user_data)580 qat_sym_dp_enqueue_single_aead_gen3(void *qp_data, uint8_t *drv_ctx,
581 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
582 	union rte_crypto_sym_ofs ofs,
583 	struct rte_crypto_va_iova_ptr *iv,
584 	struct rte_crypto_va_iova_ptr *digest,
585 	struct rte_crypto_va_iova_ptr *aad,
586 	void *user_data)
587 {
588 	struct qat_qp *qp = qp_data;
589 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
590 	struct qat_queue *tx_queue = &qp->tx_q;
591 	struct qat_sym_op_cookie *cookie;
592 	struct qat_sym_session *ctx = dp_ctx->session;
593 	struct icp_qat_fw_la_bulk_req *req;
594 
595 	int32_t data_len;
596 	uint32_t tail = dp_ctx->tail;
597 
598 	req = (struct icp_qat_fw_la_bulk_req *)(
599 		(uint8_t *)tx_queue->base_addr + tail);
600 	cookie = qp->op_cookies[tail >> tx_queue->trailz];
601 	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
602 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
603 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
604 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
605 			data, n_data_vecs, NULL, 0);
606 	if (unlikely(data_len < 0))
607 		return -1;
608 
609 	enqueue_one_aead_job_gen3(ctx, req, iv, digest, aad, ofs,
610 		(uint32_t)data_len);
611 
612 	dp_ctx->tail = tail;
613 	dp_ctx->cached_enqueue++;
614 
615 	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
616 			NULL, aad, digest);
617 
618 	return 0;
619 }
620 
621 static uint32_t
qat_sym_dp_enqueue_aead_jobs_gen3(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_sym_vec * vec,union rte_crypto_sym_ofs ofs,void * user_data[],int * status)622 qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
623 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
624 	void *user_data[], int *status)
625 {
626 	struct qat_qp *qp = qp_data;
627 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
628 	struct qat_queue *tx_queue = &qp->tx_q;
629 	struct qat_sym_session *ctx = dp_ctx->session;
630 	uint32_t i, n;
631 	uint32_t tail;
632 	struct icp_qat_fw_la_bulk_req *req;
633 	int32_t data_len;
634 
635 	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
636 	if (unlikely(n == 0)) {
637 		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
638 		*status = 0;
639 		return 0;
640 	}
641 
642 	tail = dp_ctx->tail;
643 
644 	for (i = 0; i < n; i++) {
645 		struct qat_sym_op_cookie *cookie =
646 			qp->op_cookies[tail >> tx_queue->trailz];
647 
648 		req  = (struct icp_qat_fw_la_bulk_req *)(
649 			(uint8_t *)tx_queue->base_addr + tail);
650 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
651 
652 		if (vec->dest_sgl) {
653 			data_len = qat_sym_build_req_set_data(req,
654 				user_data[i], cookie,
655 				vec->src_sgl[i].vec, vec->src_sgl[i].num,
656 				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
657 		} else {
658 			data_len = qat_sym_build_req_set_data(req,
659 				user_data[i], cookie,
660 				vec->src_sgl[i].vec,
661 				vec->src_sgl[i].num, NULL, 0);
662 		}
663 
664 		if (unlikely(data_len < 0))
665 			break;
666 
667 		enqueue_one_aead_job_gen3(ctx, req, &vec->iv[i],
668 				&vec->digest[i], &vec->aad[i], ofs,
669 				(uint32_t)data_len);
670 
671 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
672 
673 		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
674 				vec->src_sgl[i].num, &vec->iv[i], NULL,
675 				&vec->aad[i], &vec->digest[i]);
676 	}
677 
678 	if (unlikely(i < n))
679 		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
680 
681 	dp_ctx->tail = tail;
682 	dp_ctx->cached_enqueue += i;
683 	*status = 0;
684 	return i;
685 }
686 
687 static int
qat_sym_dp_enqueue_single_auth_gen3(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_vec * data,uint16_t n_data_vecs,union rte_crypto_sym_ofs ofs,struct rte_crypto_va_iova_ptr * iv __rte_unused,struct rte_crypto_va_iova_ptr * digest,struct rte_crypto_va_iova_ptr * auth_iv,void * user_data)688 qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx,
689 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
690 	union rte_crypto_sym_ofs ofs,
691 	struct rte_crypto_va_iova_ptr *iv __rte_unused,
692 	struct rte_crypto_va_iova_ptr *digest,
693 	struct rte_crypto_va_iova_ptr *auth_iv,
694 	void *user_data)
695 {
696 	struct qat_qp *qp = qp_data;
697 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
698 	struct qat_queue *tx_queue = &qp->tx_q;
699 	struct qat_sym_op_cookie *cookie;
700 	struct qat_sym_session *ctx = dp_ctx->session;
701 	struct icp_qat_fw_la_bulk_req *req;
702 	int32_t data_len;
703 	uint32_t tail = dp_ctx->tail;
704 	struct rte_crypto_va_iova_ptr null_digest;
705 	struct rte_crypto_va_iova_ptr *job_digest = digest;
706 
707 	req = (struct icp_qat_fw_la_bulk_req *)(
708 		(uint8_t *)tx_queue->base_addr + tail);
709 	cookie = qp->op_cookies[tail >> tx_queue->trailz];
710 	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
711 
712 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
713 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
714 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
715 			data, n_data_vecs, NULL, 0);
716 	if (unlikely(data_len < 0))
717 		return -1;
718 
719 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
720 		null_digest.iova = cookie->digest_null_phys_addr;
721 		job_digest = &null_digest;
722 	}
723 
724 	enqueue_one_auth_job_gen3(ctx, cookie, req, job_digest, auth_iv, ofs,
725 			(uint32_t)data_len);
726 
727 	dp_ctx->tail = tail;
728 	dp_ctx->cached_enqueue++;
729 
730 	return 0;
731 }
732 
733 static uint32_t
qat_sym_dp_enqueue_auth_jobs_gen3(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_sym_vec * vec,union rte_crypto_sym_ofs ofs,void * user_data[],int * status)734 qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
735 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
736 	void *user_data[], int *status)
737 {
738 	struct qat_qp *qp = qp_data;
739 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
740 	struct qat_queue *tx_queue = &qp->tx_q;
741 	struct qat_sym_session *ctx = dp_ctx->session;
742 	uint32_t i, n;
743 	uint32_t tail;
744 	struct icp_qat_fw_la_bulk_req *req;
745 	int32_t data_len;
746 	struct rte_crypto_va_iova_ptr null_digest;
747 	struct rte_crypto_va_iova_ptr *job_digest = NULL;
748 
749 	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
750 	if (unlikely(n == 0)) {
751 		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
752 		*status = 0;
753 		return 0;
754 	}
755 
756 	tail = dp_ctx->tail;
757 
758 	for (i = 0; i < n; i++) {
759 		struct qat_sym_op_cookie *cookie =
760 			qp->op_cookies[tail >> tx_queue->trailz];
761 
762 		req  = (struct icp_qat_fw_la_bulk_req *)(
763 			(uint8_t *)tx_queue->base_addr + tail);
764 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
765 
766 		if (vec->dest_sgl) {
767 			data_len = qat_sym_build_req_set_data(req,
768 				user_data[i], cookie,
769 				vec->src_sgl[i].vec, vec->src_sgl[i].num,
770 				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
771 		} else {
772 			data_len = qat_sym_build_req_set_data(req,
773 				user_data[i], cookie,
774 				vec->src_sgl[i].vec,
775 				vec->src_sgl[i].num, NULL, 0);
776 		}
777 
778 		if (unlikely(data_len < 0))
779 			break;
780 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
781 			null_digest.iova = cookie->digest_null_phys_addr;
782 			job_digest = &null_digest;
783 		} else
784 			job_digest = &vec->digest[i];
785 
786 		enqueue_one_auth_job_gen3(ctx, cookie, req, job_digest,
787 			&vec->auth_iv[i], ofs, (uint32_t)data_len);
788 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
789 	}
790 
791 	if (unlikely(i < n))
792 		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
793 
794 	dp_ctx->tail = tail;
795 	dp_ctx->cached_enqueue += i;
796 	*status = 0;
797 	return i;
798 }
799 
800 static int
qat_sym_configure_raw_dp_ctx_gen3(void * _raw_dp_ctx,void * _ctx)801 qat_sym_configure_raw_dp_ctx_gen3(void *_raw_dp_ctx, void *_ctx)
802 {
803 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
804 	struct qat_sym_session *ctx = _ctx;
805 	int ret;
806 
807 	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
808 	if (ret < 0)
809 		return ret;
810 
811 	if (ctx->is_single_pass) {
812 		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen3;
813 		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen3;
814 	} else if (ctx->is_single_pass_gmac) {
815 		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen3;
816 		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen3;
817 	}
818 
819 	return 0;
820 }
821 
822 
RTE_INIT(qat_sym_crypto_gen3_init)823 RTE_INIT(qat_sym_crypto_gen3_init)
824 {
825 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
826 	qat_sym_gen_dev_ops[QAT_GEN3].get_capabilities =
827 			qat_sym_crypto_cap_get_gen3;
828 	qat_sym_gen_dev_ops[QAT_GEN3].get_feature_flags =
829 			qat_sym_crypto_feature_flags_get_gen1;
830 	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
831 			qat_sym_crypto_set_session_gen3;
832 	qat_sym_gen_dev_ops[QAT_GEN3].set_raw_dp_ctx =
833 			qat_sym_configure_raw_dp_ctx_gen3;
834 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
835 			qat_sym_create_security_gen1;
836 }
837 
RTE_INIT(qat_asym_crypto_gen3_init)838 RTE_INIT(qat_asym_crypto_gen3_init)
839 {
840 	qat_asym_gen_dev_ops[QAT_GEN3].cryptodev_ops =
841 			&qat_asym_crypto_ops_gen1;
842 	qat_asym_gen_dev_ops[QAT_GEN3].get_capabilities =
843 			qat_asym_crypto_cap_get_gen1;
844 	qat_asym_gen_dev_ops[QAT_GEN3].get_feature_flags =
845 			qat_asym_crypto_feature_flags_get_gen1;
846 	qat_asym_gen_dev_ops[QAT_GEN3].set_session =
847 			qat_asym_crypto_set_session_gen1;
848 }
849