xref: /dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h (revision 9f0a50dd2bc4acec586616bf5553de4711451991)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2022 Intel Corporation
3  */
4 
5 #ifndef _QAT_CRYPTO_PMD_GENS_H_
6 #define _QAT_CRYPTO_PMD_GENS_H_
7 
8 #include <rte_cryptodev.h>
9 #include "qat_crypto.h"
10 #include "qat_sym_session.h"
11 #include "qat_sym.h"
12 
13 #define AES_OR_3DES_MISALIGNED (ctx->qat_mode == ICP_QAT_HW_CIPHER_CBC_MODE && \
14 			((((ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128) || \
15 			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES192) || \
16 			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES256)) && \
17 			(cipher_param->cipher_length % ICP_QAT_HW_AES_BLK_SZ)) || \
18 			((ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) && \
19 			(cipher_param->cipher_length % ICP_QAT_HW_3DES_BLK_SZ))))
20 #define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
21 	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
22 
23 #define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
24 	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
25 	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
26 
27 #ifdef RTE_QAT_OPENSSL
28 static __rte_always_inline int
29 op_bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
30 		uint8_t *iv, int ivlen, int srclen,
31 		void *bpi_ctx)
32 {
33 	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
34 	int encrypted_ivlen;
35 	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
36 	uint8_t *encr = encrypted_iv;
37 
38 	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
39 	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
40 								<= 0)
41 		goto cipher_decrypt_err;
42 
43 	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
44 		*dst = *src ^ *encr;
45 
46 	return 0;
47 
48 cipher_decrypt_err:
49 	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
50 	return -EINVAL;
51 }
52 #endif
53 
54 static __rte_always_inline uint32_t
55 qat_bpicipher_preprocess(struct qat_sym_session *ctx,
56 				struct rte_crypto_op *op)
57 {
58 	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
59 	struct rte_crypto_sym_op *sym_op = op->sym;
60 	uint8_t last_block_len = block_len > 0 ?
61 			sym_op->cipher.data.length % block_len : 0;
62 
63 	if (last_block_len && ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
64 		/* Decrypt last block */
65 		uint8_t *last_block, *dst, *iv;
66 		uint32_t last_block_offset = sym_op->cipher.data.offset +
67 				sym_op->cipher.data.length - last_block_len;
68 		last_block = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
69 						     last_block_offset);
70 
71 		if (unlikely((sym_op->m_dst != NULL)
72 				&& (sym_op->m_dst != sym_op->m_src)))
73 			/* out-of-place operation (OOP) */
74 			dst = rte_pktmbuf_mtod_offset(sym_op->m_dst,
75 						      uint8_t *,
76 						      last_block_offset);
77 		else
78 			dst = last_block;
79 
80 		if (last_block_len < sym_op->cipher.data.length)
81 			/* use previous block ciphertext as IV */
82 			iv = last_block - block_len;
83 		else
84 			/* runt block, i.e. less than one full block */
85 			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
86 					ctx->cipher_iv.offset);
87 
88 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
89 		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
90 			last_block, last_block_len);
91 		if (sym_op->m_dst != NULL)
92 			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst before pre-process:",
93 			dst, last_block_len);
94 #endif
95 #ifdef RTE_QAT_OPENSSL
96 		op_bpi_cipher_decrypt(last_block, dst, iv, block_len,
97 				last_block_len, ctx->bpi_ctx);
98 #else
99 		bpi_cipher_ipsec(last_block, dst, iv, last_block_len, ctx->expkey,
100 			ctx->mb_mgr, ctx->docsis_key_len);
101 #endif
102 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
103 		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
104 			last_block, last_block_len);
105 		if (sym_op->m_dst != NULL)
106 			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
107 			dst, last_block_len);
108 #endif
109 	}
110 
111 	return sym_op->cipher.data.length - last_block_len;
112 }
113 
114 static __rte_always_inline int
115 qat_auth_is_len_in_bits(struct qat_sym_session *ctx,
116 		struct rte_crypto_op *op)
117 {
118 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
119 		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
120 		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 ||
121 		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32 ||
122 		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64 ||
123 		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128) {
124 		if (unlikely((op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
125 				(op->sym->auth.data.length % BYTE_LENGTH != 0)))
126 			return -EINVAL;
127 		return 1;
128 	}
129 	return 0;
130 }
131 
132 static __rte_always_inline int
133 qat_cipher_is_len_in_bits(struct qat_sym_session *ctx,
134 		struct rte_crypto_op *op)
135 {
136 	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
137 		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
138 		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 ||
139 		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_256)  {
140 		if (unlikely((op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
141 			((op->sym->cipher.data.offset %
142 			BYTE_LENGTH) != 0)))
143 			return -EINVAL;
144 		return 1;
145 	}
146 	return 0;
147 }
148 
149 static __rte_always_inline int32_t
150 qat_sym_build_req_set_data(struct icp_qat_fw_la_bulk_req *req,
151 		void *opaque, struct qat_sym_op_cookie *cookie,
152 		struct rte_crypto_vec *src_vec, uint16_t n_src,
153 		struct rte_crypto_vec *dst_vec, uint16_t n_dst)
154 {
155 	struct qat_sgl *list;
156 	uint32_t i;
157 	uint32_t tl_src = 0, total_len_src, total_len_dst;
158 	uint64_t src_data_start = 0, dst_data_start = 0;
159 	int is_sgl = n_src > 1 || n_dst > 1;
160 
161 	if (unlikely(n_src < 1 || n_src > QAT_SYM_SGL_MAX_NUMBER ||
162 			n_dst > QAT_SYM_SGL_MAX_NUMBER))
163 		return -1;
164 
165 	if (likely(!is_sgl)) {
166 		src_data_start = src_vec[0].iova;
167 		tl_src = total_len_src =
168 				src_vec[0].len;
169 		if (unlikely(n_dst)) { /* oop */
170 			total_len_dst = dst_vec[0].len;
171 
172 			dst_data_start = dst_vec[0].iova;
173 			if (unlikely(total_len_src != total_len_dst))
174 				return -EINVAL;
175 		} else {
176 			dst_data_start = src_data_start;
177 			total_len_dst = tl_src;
178 		}
179 	} else { /* sgl */
180 		total_len_dst = total_len_src = 0;
181 
182 		ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
183 			QAT_COMN_PTR_TYPE_SGL);
184 
185 		list = (struct qat_sgl *)&cookie->qat_sgl_src;
186 		for (i = 0; i < n_src; i++) {
187 			list->buffers[i].len = src_vec[i].len;
188 			list->buffers[i].resrvd = 0;
189 			list->buffers[i].addr = src_vec[i].iova;
190 			if (tl_src + src_vec[i].len > UINT32_MAX) {
191 				QAT_DP_LOG(ERR, "Message too long");
192 				return -1;
193 			}
194 			tl_src += src_vec[i].len;
195 		}
196 
197 		list->num_bufs = i;
198 		src_data_start = cookie->qat_sgl_src_phys_addr;
199 
200 		if (unlikely(n_dst > 0)) { /* oop sgl */
201 			uint32_t tl_dst = 0;
202 
203 			list = (struct qat_sgl *)&cookie->qat_sgl_dst;
204 
205 			for (i = 0; i < n_dst; i++) {
206 				list->buffers[i].len = dst_vec[i].len;
207 				list->buffers[i].resrvd = 0;
208 				list->buffers[i].addr = dst_vec[i].iova;
209 				if (tl_dst + dst_vec[i].len > UINT32_MAX) {
210 					QAT_DP_LOG(ERR, "Message too long");
211 					return -ENOTSUP;
212 				}
213 
214 				tl_dst += dst_vec[i].len;
215 			}
216 
217 			if (tl_src != tl_dst)
218 				return -EINVAL;
219 			list->num_bufs = i;
220 			dst_data_start = cookie->qat_sgl_dst_phys_addr;
221 		} else
222 			dst_data_start = src_data_start;
223 	}
224 
225 	req->comn_mid.src_data_addr = src_data_start;
226 	req->comn_mid.dest_data_addr = dst_data_start;
227 	req->comn_mid.src_length = total_len_src;
228 	req->comn_mid.dst_length = total_len_dst;
229 	req->comn_mid.opaque_data = (uintptr_t)opaque;
230 
231 	return tl_src;
232 }
233 
234 static __rte_always_inline uint64_t
235 qat_sym_convert_op_to_vec_cipher(struct rte_crypto_op *op,
236 		struct qat_sym_session *ctx,
237 		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
238 		struct rte_crypto_va_iova_ptr *cipher_iv,
239 		struct rte_crypto_va_iova_ptr *auth_iv_or_aad __rte_unused,
240 		struct rte_crypto_va_iova_ptr *digest __rte_unused)
241 {
242 	uint32_t cipher_len = 0, cipher_ofs = 0;
243 	int n_src = 0;
244 	int ret;
245 
246 	ret = qat_cipher_is_len_in_bits(ctx, op);
247 	switch (ret) {
248 	case 1:
249 		cipher_len = op->sym->cipher.data.length >> 3;
250 		cipher_ofs = op->sym->cipher.data.offset >> 3;
251 		break;
252 	case 0:
253 
254 #ifdef RTE_QAT_OPENSSL
255 		if (ctx->bpi_ctx) {
256 #else
257 		if (ctx->mb_mgr) {
258 #endif
259 			/* DOCSIS - only send complete blocks to device.
260 			 * Process any partial block using CFB mode.
261 			 * Even if 0 complete blocks, still send this to device
262 			 * to get into rx queue for post-process and dequeuing
263 			 */
264 			cipher_len = qat_bpicipher_preprocess(ctx, op);
265 			cipher_ofs = op->sym->cipher.data.offset;
266 		} else {
267 			cipher_len = op->sym->cipher.data.length;
268 			cipher_ofs = op->sym->cipher.data.offset;
269 		}
270 		break;
271 	default:
272 		QAT_DP_LOG(ERR,
273 	  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
274 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
275 		return UINT64_MAX;
276 	}
277 
278 	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
279 			ctx->cipher_iv.offset);
280 	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
281 			ctx->cipher_iv.offset);
282 
283 	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs,
284 			cipher_len, in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
285 	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
286 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
287 		return UINT64_MAX;
288 	}
289 
290 	in_sgl->num = n_src;
291 
292 	/* Out-Of-Place operation */
293 	if (unlikely((op->sym->m_dst != NULL) &&
294 			(op->sym->m_dst != op->sym->m_src))) {
295 		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
296 				cipher_len, out_sgl->vec,
297 				QAT_SYM_SGL_MAX_NUMBER);
298 
299 		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
300 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
301 			return UINT64_MAX;
302 		}
303 
304 		out_sgl->num = n_dst;
305 	} else
306 		out_sgl->num = 0;
307 
308 	return 0;
309 }
310 
311 static __rte_always_inline uint64_t
312 qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op,
313 		struct qat_sym_session *ctx,
314 		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
315 		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
316 		struct rte_crypto_va_iova_ptr *auth_iv,
317 		struct rte_crypto_va_iova_ptr *digest,
318 		struct qat_sym_op_cookie *cookie)
319 {
320 	uint32_t auth_ofs = 0, auth_len = 0;
321 	int n_src, ret;
322 
323 	ret = qat_auth_is_len_in_bits(ctx, op);
324 	switch (ret) {
325 	case 1:
326 		auth_ofs = op->sym->auth.data.offset >> 3;
327 		auth_len = op->sym->auth.data.length >> 3;
328 		auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
329 				ctx->auth_iv.offset);
330 		auth_iv->iova = rte_crypto_op_ctophys_offset(op,
331 				ctx->auth_iv.offset);
332 		break;
333 	case 0:
334 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
335 			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
336 			/* AES-GMAC */
337 			auth_ofs = op->sym->auth.data.offset;
338 			auth_len = op->sym->auth.data.length;
339 			auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
340 					ctx->auth_iv.offset);
341 			auth_iv->iova = rte_crypto_op_ctophys_offset(op,
342 					ctx->auth_iv.offset);
343 		} else {
344 			auth_ofs = op->sym->auth.data.offset;
345 			auth_len = op->sym->auth.data.length;
346 			auth_iv->va = NULL;
347 			auth_iv->iova = 0;
348 		}
349 		break;
350 	default:
351 		QAT_DP_LOG(ERR,
352 	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
353 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
354 		return UINT64_MAX;
355 	}
356 
357 	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, auth_ofs,
358 			auth_len, in_sgl->vec,
359 			QAT_SYM_SGL_MAX_NUMBER);
360 	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
361 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
362 		return UINT64_MAX;
363 	}
364 
365 	in_sgl->num = n_src;
366 
367 	/* Out-Of-Place operation */
368 	if (unlikely((op->sym->m_dst != NULL) &&
369 			(op->sym->m_dst != op->sym->m_src))) {
370 		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, auth_ofs,
371 				auth_len, out_sgl->vec,
372 				QAT_SYM_SGL_MAX_NUMBER);
373 
374 		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
375 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
376 			return UINT64_MAX;
377 		}
378 		out_sgl->num = n_dst;
379 	} else
380 		out_sgl->num = 0;
381 
382 	digest->va = (void *)op->sym->auth.digest.data;
383 
384 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL)
385 		digest->iova = cookie->digest_null_phys_addr;
386 	else
387 		digest->iova = op->sym->auth.digest.phys_addr;
388 
389 	return 0;
390 }
391 
392 static __rte_always_inline uint64_t
393 qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op,
394 		struct qat_sym_session *ctx,
395 		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
396 		struct rte_crypto_va_iova_ptr *cipher_iv,
397 		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
398 		struct rte_crypto_va_iova_ptr *digest,
399 		struct qat_sym_op_cookie *cookie)
400 {
401 	union rte_crypto_sym_ofs ofs;
402 	uint32_t max_len = 0, oop_offset = 0;
403 	uint32_t cipher_len = 0, cipher_ofs = 0;
404 	uint32_t auth_len = 0, auth_ofs = 0;
405 	int is_oop = (op->sym->m_dst != NULL) &&
406 			(op->sym->m_dst != op->sym->m_src);
407 	int is_sgl = op->sym->m_src->nb_segs > 1;
408 	int is_bpi = 0;
409 	int n_src;
410 	int ret;
411 
412 	if (unlikely(is_oop))
413 		is_sgl |= op->sym->m_dst->nb_segs > 1;
414 
415 	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
416 			ctx->cipher_iv.offset);
417 	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
418 			ctx->cipher_iv.offset);
419 	auth_iv_or_aad->va = rte_crypto_op_ctod_offset(op, void *,
420 			ctx->auth_iv.offset);
421 	auth_iv_or_aad->iova = rte_crypto_op_ctophys_offset(op,
422 			ctx->auth_iv.offset);
423 	digest->va = (void *)op->sym->auth.digest.data;
424 
425 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL)
426 		digest->iova = cookie->digest_null_phys_addr;
427 	else
428 		digest->iova = op->sym->auth.digest.phys_addr;
429 
430 	ret = qat_cipher_is_len_in_bits(ctx, op);
431 	switch (ret) {
432 	case 1:
433 		cipher_len = op->sym->cipher.data.length >> 3;
434 		cipher_ofs = op->sym->cipher.data.offset >> 3;
435 		break;
436 	case 0:
437 #ifdef RTE_QAT_OPENSSL
438 		if (ctx->bpi_ctx) {
439 #else
440 		if (ctx->mb_mgr) {
441 #endif
442 			cipher_len = qat_bpicipher_preprocess(ctx, op);
443 			cipher_ofs = op->sym->cipher.data.offset;
444 			is_bpi = 1;
445 		} else {
446 			cipher_len = op->sym->cipher.data.length;
447 			cipher_ofs = op->sym->cipher.data.offset;
448 		}
449 		break;
450 	default:
451 		QAT_DP_LOG(ERR,
452 	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
453 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
454 		return -EINVAL;
455 	}
456 
457 	ret = qat_auth_is_len_in_bits(ctx, op);
458 	switch (ret) {
459 	case 1:
460 		auth_len = op->sym->auth.data.length >> 3;
461 		auth_ofs = op->sym->auth.data.offset >> 3;
462 		break;
463 	case 0:
464 		auth_len = op->sym->auth.data.length;
465 		auth_ofs = op->sym->auth.data.offset;
466 		break;
467 	default:
468 		QAT_DP_LOG(ERR,
469 	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
470 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
471 		return -EINVAL;
472 	}
473 
474 	max_len = RTE_MAX(cipher_ofs + cipher_len, auth_ofs + auth_len);
475 
476 	/* If OOP, we need to keep in mind that offset needs to start where
477 	 * cipher/auth starts, namely no offset on the smaller one
478 	 */
479 	if (is_oop) {
480 		oop_offset = RTE_MIN(auth_ofs, cipher_ofs);
481 		auth_ofs -= oop_offset;
482 		cipher_ofs -= oop_offset;
483 		max_len -= oop_offset;
484 	}
485 
486 	/* digest in buffer check. Needed only for wireless algos
487 	 * or combined cipher-crc operations
488 	 */
489 	if (ret == 1 || is_bpi) {
490 		/* Handle digest-encrypted cases, i.e.
491 		 * auth-gen-then-cipher-encrypt and
492 		 * cipher-decrypt-then-auth-verify
493 		 */
494 		uint64_t auth_end_iova;
495 
496 		if (unlikely(is_sgl)) {
497 			uint32_t remaining_off = auth_ofs + auth_len;
498 			struct rte_mbuf *sgl_buf = (is_oop ? op->sym->m_dst :
499 				op->sym->m_src);
500 
501 			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
502 					&& sgl_buf->next != NULL) {
503 				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
504 				sgl_buf = sgl_buf->next;
505 			}
506 
507 			auth_end_iova = (uint64_t)rte_pktmbuf_iova_offset(
508 				sgl_buf, remaining_off);
509 		} else
510 			auth_end_iova = (is_oop ?
511 				rte_pktmbuf_iova(op->sym->m_dst) :
512 				rte_pktmbuf_iova(op->sym->m_src)) + auth_ofs +
513 					auth_len;
514 
515 		/* Then check if digest-encrypted conditions are met */
516 		if (((auth_ofs + auth_len < cipher_ofs + cipher_len) &&
517 				(digest->iova == auth_end_iova)) ||
518 #ifdef RTE_QAT_OPENSSL
519 				ctx->bpi_ctx)
520 #else
521 				ctx->mb_mgr)
522 #endif
523 			max_len = RTE_MAX(max_len, auth_ofs + auth_len +
524 					ctx->digest_length);
525 	}
526 	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, oop_offset, max_len,
527 			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
528 	if (unlikely(n_src < 0 || n_src > op->sym->m_src->nb_segs)) {
529 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
530 		return -1;
531 	}
532 	in_sgl->num = n_src;
533 
534 	if (unlikely((op->sym->m_dst != NULL) &&
535 			(op->sym->m_dst != op->sym->m_src))) {
536 		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, oop_offset,
537 				max_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
538 
539 		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
540 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
541 			return -1;
542 		}
543 		out_sgl->num = n_dst;
544 	} else
545 		out_sgl->num = 0;
546 
547 	ofs.ofs.cipher.head = cipher_ofs;
548 	ofs.ofs.cipher.tail = max_len - cipher_ofs - cipher_len;
549 	ofs.ofs.auth.head = auth_ofs;
550 	ofs.ofs.auth.tail = max_len - auth_ofs - auth_len;
551 
552 	return ofs.raw;
553 }
554 
555 static __rte_always_inline uint64_t
556 qat_sym_convert_op_to_vec_aead(struct rte_crypto_op *op,
557 		struct qat_sym_session *ctx,
558 		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
559 		struct rte_crypto_va_iova_ptr *cipher_iv,
560 		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
561 		struct rte_crypto_va_iova_ptr *digest)
562 {
563 	uint32_t cipher_len = 0, cipher_ofs = 0;
564 	int32_t n_src = 0;
565 
566 	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
567 			ctx->cipher_iv.offset);
568 	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
569 			ctx->cipher_iv.offset);
570 	auth_iv_or_aad->va = (void *)op->sym->aead.aad.data;
571 	auth_iv_or_aad->iova = op->sym->aead.aad.phys_addr;
572 	digest->va = (void *)op->sym->aead.digest.data;
573 	digest->iova = op->sym->aead.digest.phys_addr;
574 
575 	cipher_len = op->sym->aead.data.length;
576 	cipher_ofs = op->sym->aead.data.offset;
577 
578 	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs, cipher_len,
579 			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
580 	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
581 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
582 		return UINT64_MAX;
583 	}
584 	in_sgl->num = n_src;
585 
586 	/* Out-Of-Place operation */
587 	if (unlikely((op->sym->m_dst != NULL) &&
588 			(op->sym->m_dst != op->sym->m_src))) {
589 		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
590 				cipher_len, out_sgl->vec,
591 				QAT_SYM_SGL_MAX_NUMBER);
592 		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
593 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
594 			return UINT64_MAX;
595 		}
596 
597 		out_sgl->num = n_dst;
598 	} else
599 		out_sgl->num = 0;
600 
601 	return 0;
602 }
603 
604 static inline void
605 zuc256_modify_iv(uint8_t *iv)
606 {
607 	uint8_t iv_tmp[8];
608 
609 	iv_tmp[0] = iv[16];
610 	/* pack the last 8 bytes of IV to 6 bytes.
611 	 * discard the 2 MSB bits of each byte
612 	 */
613 	iv_tmp[1] = (((iv[17] & 0x3f) << 2) | ((iv[18] >> 4) & 0x3));
614 	iv_tmp[2] = (((iv[18] & 0xf) << 4) | ((iv[19] >> 2) & 0xf));
615 	iv_tmp[3] = (((iv[19] & 0x3) << 6) | (iv[20] & 0x3f));
616 
617 	iv_tmp[4] = (((iv[21] & 0x3f) << 2) | ((iv[22] >> 4) & 0x3));
618 	iv_tmp[5] = (((iv[22] & 0xf) << 4) | ((iv[23] >> 2) & 0xf));
619 	iv_tmp[6] = (((iv[23] & 0x3) << 6) | (iv[24] & 0x3f));
620 
621 	memcpy(iv + 16, iv_tmp, 8);
622 }
623 
624 static __rte_always_inline void
625 qat_set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
626 		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
627 		struct icp_qat_fw_la_bulk_req *qat_req)
628 {
629 	/* copy IV into request if it fits */
630 	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
631 		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
632 				iv_len);
633 	else {
634 		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
635 				qat_req->comn_hdr.serv_specif_flags,
636 				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
637 		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
638 	}
639 }
640 
641 static __rte_always_inline void
642 qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
643 {
644 	uint32_t i;
645 
646 	for (i = 0; i < n; i++)
647 		sta[i] = status;
648 }
649 
650 static __rte_always_inline void
651 enqueue_one_cipher_job_gen1(struct qat_sym_session *ctx,
652 	struct icp_qat_fw_la_bulk_req *req,
653 	struct rte_crypto_va_iova_ptr *iv,
654 	union rte_crypto_sym_ofs ofs, uint32_t data_len,
655 	struct qat_sym_op_cookie *cookie)
656 {
657 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
658 
659 	cipher_param = (void *)&req->serv_specif_rqpars;
660 
661 	/* cipher IV */
662 	qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
663 	cipher_param->cipher_offset = ofs.ofs.cipher.head;
664 	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
665 			ofs.ofs.cipher.tail;
666 
667 	if (AES_OR_3DES_MISALIGNED) {
668 		QAT_LOG(DEBUG,
669 	  "Input cipher buffer misalignment detected and change job as NULL operation");
670 		struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
671 		header->service_type = ICP_QAT_FW_COMN_REQ_NULL;
672 		header->service_cmd_id = ICP_QAT_FW_NULL_REQ_SERV_ID;
673 		cookie->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
674 	}
675 }
676 
677 static __rte_always_inline void
678 enqueue_one_auth_job_gen1(struct qat_sym_session *ctx,
679 	struct icp_qat_fw_la_bulk_req *req,
680 	struct rte_crypto_va_iova_ptr *digest,
681 	struct rte_crypto_va_iova_ptr *auth_iv,
682 	union rte_crypto_sym_ofs ofs, uint32_t data_len)
683 {
684 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
685 	struct icp_qat_fw_la_auth_req_params *auth_param;
686 
687 	cipher_param = (void *)&req->serv_specif_rqpars;
688 	auth_param = (void *)((uint8_t *)cipher_param +
689 			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
690 
691 	auth_param->auth_off = ofs.ofs.auth.head;
692 	auth_param->auth_len = data_len - ofs.ofs.auth.head -
693 			ofs.ofs.auth.tail;
694 	auth_param->auth_res_addr = digest->iova;
695 
696 	switch (ctx->qat_hash_alg) {
697 	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
698 	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
699 	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
700 	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32:
701 	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64:
702 	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128:
703 		auth_param->u1.aad_adr = auth_iv->iova;
704 		break;
705 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
706 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
707 		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
708 			req->comn_hdr.serv_specif_flags,
709 				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
710 		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
711 				ctx->auth_iv.length);
712 		break;
713 	case ICP_QAT_HW_AUTH_ALGO_SM3:
714 		if (ctx->auth_mode == ICP_QAT_HW_AUTH_MODE0)
715 			auth_param->u1.aad_adr = 0;
716 		else
717 			auth_param->u1.aad_adr = ctx->prefix_paddr;
718 		break;
719 	default:
720 		break;
721 	}
722 }
723 
724 static __rte_always_inline int
725 enqueue_one_chain_job_gen1(struct qat_sym_session *ctx,
726 	struct icp_qat_fw_la_bulk_req *req,
727 	struct rte_crypto_vec *src_vec,
728 	uint16_t n_src_vecs,
729 	struct rte_crypto_vec *dst_vec,
730 	uint16_t n_dst_vecs,
731 	struct rte_crypto_va_iova_ptr *cipher_iv,
732 	struct rte_crypto_va_iova_ptr *digest,
733 	struct rte_crypto_va_iova_ptr *auth_iv,
734 	union rte_crypto_sym_ofs ofs, uint32_t data_len,
735 	struct qat_sym_op_cookie *cookie)
736 {
737 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
738 	struct icp_qat_fw_la_auth_req_params *auth_param;
739 	struct rte_crypto_vec *cvec = n_dst_vecs > 0 ?
740 			dst_vec : src_vec;
741 	rte_iova_t auth_iova_end;
742 	int cipher_len, auth_len;
743 	int is_sgl = n_src_vecs > 1 || n_dst_vecs > 1;
744 
745 	cipher_param = (void *)&req->serv_specif_rqpars;
746 	auth_param = (void *)((uint8_t *)cipher_param +
747 			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
748 
749 	cipher_len = data_len - ofs.ofs.cipher.head -
750 			ofs.ofs.cipher.tail;
751 	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
752 
753 	if (unlikely(cipher_len < 0 || auth_len < 0))
754 		return -1;
755 
756 	cipher_param->cipher_offset = ofs.ofs.cipher.head;
757 	cipher_param->cipher_length = cipher_len;
758 	qat_set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
759 
760 	auth_param->auth_off = ofs.ofs.auth.head;
761 	auth_param->auth_len = auth_len;
762 	auth_param->auth_res_addr = digest->iova;
763 	/* Input cipher length alignment requirement for 3DES-CBC and AES-CBC.
764 	 * For 3DES-CBC cipher algo, ESP Payload size requires 8 Byte aligned.
765 	 * For AES-CBC cipher algo, ESP Payload size requires 16 Byte aligned.
766 	 * The alignment should be guaranteed by the ESP package padding field
767 	 * according to the RFC4303. Under this condition, QAT will pass through
768 	 * chain job as NULL cipher and NULL auth operation and report misalignment
769 	 * error detected.
770 	 */
771 	if (AES_OR_3DES_MISALIGNED) {
772 		QAT_LOG(DEBUG,
773 	  "Input cipher buffer misalignment detected and change job as NULL operation");
774 		struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
775 		header->service_type = ICP_QAT_FW_COMN_REQ_NULL;
776 		header->service_cmd_id = ICP_QAT_FW_NULL_REQ_SERV_ID;
777 		cookie->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
778 		return -1;
779 	}
780 
781 	switch (ctx->qat_hash_alg) {
782 	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
783 	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
784 	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
785 	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32:
786 	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64:
787 	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128:
788 		auth_param->u1.aad_adr = auth_iv->iova;
789 		break;
790 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
791 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
792 		break;
793 	case ICP_QAT_HW_AUTH_ALGO_SM3:
794 		if (ctx->auth_mode == ICP_QAT_HW_AUTH_MODE0)
795 			auth_param->u1.aad_adr = 0;
796 		else
797 			auth_param->u1.aad_adr = ctx->prefix_paddr;
798 		break;
799 	default:
800 		break;
801 	}
802 
803 	if (unlikely(is_sgl)) {
804 		/* sgl */
805 		int i = n_dst_vecs ? n_dst_vecs : n_src_vecs;
806 		uint32_t remaining_off = data_len - ofs.ofs.auth.tail;
807 
808 		while (remaining_off >= cvec->len && i >= 1) {
809 			i--;
810 			remaining_off -= cvec->len;
811 			if (i)
812 				cvec++;
813 		}
814 
815 		auth_iova_end = cvec->iova + remaining_off;
816 	} else
817 		auth_iova_end = cvec[0].iova + auth_param->auth_off +
818 			auth_param->auth_len;
819 
820 	/* Then check if digest-encrypted conditions are met */
821 	if (((auth_param->auth_off + auth_param->auth_len <
822 		cipher_param->cipher_offset + cipher_param->cipher_length) &&
823 			(digest->iova == auth_iova_end)) ||
824 #ifdef RTE_QAT_OPENSSL
825 			ctx->bpi_ctx) {
826 #else
827 			ctx->mb_mgr) {
828 #endif
829 		/* Handle partial digest encryption */
830 		if (cipher_param->cipher_offset + cipher_param->cipher_length <
831 			auth_param->auth_off + auth_param->auth_len +
832 				ctx->digest_length && !is_sgl)
833 			req->comn_mid.dst_length = req->comn_mid.src_length =
834 				auth_param->auth_off + auth_param->auth_len +
835 					ctx->digest_length;
836 		struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
837 		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
838 			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
839 	}
840 
841 	return 0;
842 }
843 
844 static __rte_always_inline void
845 enqueue_one_aead_job_gen1(struct qat_sym_session *ctx,
846 	struct icp_qat_fw_la_bulk_req *req,
847 	struct rte_crypto_va_iova_ptr *iv,
848 	struct rte_crypto_va_iova_ptr *digest,
849 	struct rte_crypto_va_iova_ptr *aad,
850 	union rte_crypto_sym_ofs ofs, uint32_t data_len)
851 {
852 	struct icp_qat_fw_la_cipher_req_params *cipher_param =
853 		(void *)&req->serv_specif_rqpars;
854 	struct icp_qat_fw_la_auth_req_params *auth_param =
855 		(void *)((uint8_t *)&req->serv_specif_rqpars +
856 		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
857 	uint8_t *aad_data;
858 	uint8_t aad_ccm_real_len;
859 	uint8_t aad_len_field_sz;
860 	uint32_t msg_len_be;
861 	rte_iova_t aad_iova = 0;
862 	uint8_t q;
863 
864 	switch (ctx->qat_hash_alg) {
865 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
866 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
867 		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
868 			req->comn_hdr.serv_specif_flags,
869 				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
870 		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
871 				ctx->cipher_iv.length);
872 		aad_iova = aad->iova;
873 		break;
874 	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
875 		aad_data = aad->va;
876 		aad_iova = aad->iova;
877 		aad_ccm_real_len = 0;
878 		aad_len_field_sz = 0;
879 		msg_len_be = rte_bswap32((uint32_t)data_len -
880 				ofs.ofs.cipher.head);
881 
882 		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
883 			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
884 			aad_ccm_real_len = ctx->aad_len -
885 				ICP_QAT_HW_CCM_AAD_B0_LEN -
886 				ICP_QAT_HW_CCM_AAD_LEN_INFO;
887 		} else {
888 			aad_data = iv->va;
889 			aad_iova = iv->iova;
890 		}
891 
892 		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
893 		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
894 			aad_len_field_sz, ctx->digest_length, q);
895 		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
896 			memcpy(aad_data	+ ctx->cipher_iv.length +
897 				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
898 				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
899 				(uint8_t *)&msg_len_be,
900 				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
901 		} else {
902 			memcpy(aad_data	+ ctx->cipher_iv.length +
903 				ICP_QAT_HW_CCM_NONCE_OFFSET,
904 				(uint8_t *)&msg_len_be +
905 				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
906 				- q), q);
907 		}
908 
909 		if (aad_len_field_sz > 0) {
910 			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
911 				rte_bswap16(aad_ccm_real_len);
912 
913 			if ((aad_ccm_real_len + aad_len_field_sz)
914 				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
915 				uint8_t pad_len = 0;
916 				uint8_t pad_idx = 0;
917 
918 				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
919 					((aad_ccm_real_len +
920 					aad_len_field_sz) %
921 					ICP_QAT_HW_CCM_AAD_B0_LEN);
922 				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
923 					aad_ccm_real_len +
924 					aad_len_field_sz;
925 				memset(&aad_data[pad_idx], 0, pad_len);
926 			}
927 		}
928 
929 		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
930 			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
931 			(uint8_t *)iv->va +
932 			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
933 		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
934 			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
935 
936 		if (ctx->aad_len > 0) {
937 			rte_memcpy((uint8_t *)aad->va +
938 					ICP_QAT_HW_CCM_NONCE_OFFSET,
939 				(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
940 				ctx->cipher_iv.length);
941 		}
942 		break;
943 	default:
944 		break;
945 	}
946 
947 	cipher_param->cipher_offset = ofs.ofs.cipher.head;
948 	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
949 			ofs.ofs.cipher.tail;
950 	auth_param->auth_off = ofs.ofs.cipher.head;
951 	auth_param->auth_len = cipher_param->cipher_length;
952 	auth_param->auth_res_addr = digest->iova;
953 	auth_param->u1.aad_adr = aad_iova;
954 }
955 
956 extern struct rte_cryptodev_ops qat_sym_crypto_ops_gen1;
957 extern struct rte_cryptodev_ops qat_asym_crypto_ops_gen1;
958 
959 /* -----------------GEN 1 sym crypto op data path APIs ---------------- */
960 int
961 qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
962 	uint8_t *out_msg, void *op_cookie);
963 
964 int
965 qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
966 		uint8_t *out_msg, void *op_cookie);
967 
968 int
969 qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
970 		uint8_t *out_msg, void *op_cookie);
971 
972 int
973 qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
974 		uint8_t *out_msg, void *op_cookie);
975 
976 /* -----------------GEN 1 sym crypto raw data path APIs ---------------- */
977 int
978 qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
979 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
980 	union rte_crypto_sym_ofs ofs,
981 	struct rte_crypto_va_iova_ptr *iv,
982 	struct rte_crypto_va_iova_ptr *digest __rte_unused,
983 	struct rte_crypto_va_iova_ptr *aad __rte_unused,
984 	void *user_data);
985 
986 uint32_t
987 qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
988 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
989 	void *user_data[], int *status);
990 
991 int
992 qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
993 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
994 	union rte_crypto_sym_ofs ofs,
995 	struct rte_crypto_va_iova_ptr *iv __rte_unused,
996 	struct rte_crypto_va_iova_ptr *digest,
997 	struct rte_crypto_va_iova_ptr *auth_iv,
998 	void *user_data);
999 
1000 uint32_t
1001 qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
1002 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1003 	void *user_data[], int *status);
1004 
1005 int
1006 qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
1007 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
1008 	union rte_crypto_sym_ofs ofs,
1009 	struct rte_crypto_va_iova_ptr *cipher_iv,
1010 	struct rte_crypto_va_iova_ptr *digest,
1011 	struct rte_crypto_va_iova_ptr *auth_iv,
1012 	void *user_data);
1013 
1014 uint32_t
1015 qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
1016 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1017 	void *user_data[], int *status);
1018 
1019 int
1020 qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
1021 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
1022 	union rte_crypto_sym_ofs ofs,
1023 	struct rte_crypto_va_iova_ptr *iv,
1024 	struct rte_crypto_va_iova_ptr *digest,
1025 	struct rte_crypto_va_iova_ptr *aad,
1026 	void *user_data);
1027 
1028 uint32_t
1029 qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
1030 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1031 	void *user_data[], int *status);
1032 
1033 void *
1034 qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
1035 	int *dequeue_status, enum rte_crypto_op_status *op_status);
1036 
1037 uint32_t
1038 qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
1039 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1040 	uint32_t max_nb_to_dequeue,
1041 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1042 	void **out_user_data, uint8_t is_user_data_array,
1043 	uint32_t *n_success_jobs, int *return_status);
1044 
1045 int
1046 qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
1047 
1048 int
1049 qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
1050 
1051 int
1052 qat_sym_dp_enqueue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n);
1053 
1054 int
1055 qat_sym_dp_dequeue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n);
1056 
1057 int
1058 qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx);
1059 
1060 /* -----------------GENx control path APIs ---------------- */
1061 uint64_t
1062 qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
1063 
1064 int
1065 qat_sym_crypto_set_session_gen1(void *cryptodev, void *session);
1066 
1067 int
1068 qat_sym_crypto_set_session_gen4(void *cryptodev, void *session);
1069 
1070 void
1071 qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
1072 		uint8_t hash_flag);
1073 
1074 int
1075 qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx);
1076 
1077 int
1078 qat_asym_crypto_cap_get_gen1(struct qat_cryptodev_private *internals,
1079 			const char *capa_memz_name, const uint16_t slice_map);
1080 
1081 uint64_t
1082 qat_asym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
1083 
1084 int
1085 qat_asym_crypto_set_session_gen1(void *cryptodev, void *session);
1086 
1087 extern struct rte_security_ops security_qat_ops_gen1;
1088 
1089 void *
1090 qat_sym_create_security_gen1(void *cryptodev);
1091 
1092 #endif
1093