xref: /dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2022 Intel Corporation
3  */
4 
5 #ifndef _QAT_CRYPTO_PMD_GENS_H_
6 #define _QAT_CRYPTO_PMD_GENS_H_
7 
8 #include <rte_cryptodev.h>
9 #include "qat_crypto.h"
10 #include "qat_sym_session.h"
11 #include "qat_sym.h"
12 
13 #define AES_OR_3DES_MISALIGNED (ctx->qat_mode == ICP_QAT_HW_CIPHER_CBC_MODE && \
14 			((((ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128) || \
15 			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES192) || \
16 			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES256)) && \
17 			(cipher_param->cipher_length % ICP_QAT_HW_AES_BLK_SZ)) || \
18 			((ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) && \
19 			(cipher_param->cipher_length % ICP_QAT_HW_3DES_BLK_SZ))))
20 #define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
21 	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
22 
23 #define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
24 	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
25 	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
26 
27 #ifdef RTE_QAT_OPENSSL
28 static __rte_always_inline int
29 op_bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
30 		uint8_t *iv, int ivlen, int srclen,
31 		void *bpi_ctx)
32 {
33 	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
34 	int encrypted_ivlen;
35 	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
36 	uint8_t *encr = encrypted_iv;
37 
38 	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
39 	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
40 								<= 0)
41 		goto cipher_decrypt_err;
42 
43 	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
44 		*dst = *src ^ *encr;
45 
46 	return 0;
47 
48 cipher_decrypt_err:
49 	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
50 	return -EINVAL;
51 }
52 #endif
53 
54 static __rte_always_inline uint32_t
55 qat_bpicipher_preprocess(struct qat_sym_session *ctx,
56 				struct rte_crypto_op *op)
57 {
58 	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
59 	struct rte_crypto_sym_op *sym_op = op->sym;
60 	uint8_t last_block_len = block_len > 0 ?
61 			sym_op->cipher.data.length % block_len : 0;
62 
63 	if (last_block_len && ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
64 		/* Decrypt last block */
65 		uint8_t *last_block, *dst, *iv;
66 		uint32_t last_block_offset = sym_op->cipher.data.offset +
67 				sym_op->cipher.data.length - last_block_len;
68 		last_block = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
69 						     last_block_offset);
70 
71 		if (unlikely((sym_op->m_dst != NULL)
72 				&& (sym_op->m_dst != sym_op->m_src)))
73 			/* out-of-place operation (OOP) */
74 			dst = rte_pktmbuf_mtod_offset(sym_op->m_dst,
75 						      uint8_t *,
76 						      last_block_offset);
77 		else
78 			dst = last_block;
79 
80 		if (last_block_len < sym_op->cipher.data.length)
81 			/* use previous block ciphertext as IV */
82 			iv = last_block - block_len;
83 		else
84 			/* runt block, i.e. less than one full block */
85 			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
86 					ctx->cipher_iv.offset);
87 
88 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
89 		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
90 			last_block, last_block_len);
91 		if (sym_op->m_dst != NULL)
92 			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst before pre-process:",
93 			dst, last_block_len);
94 #endif
95 #ifdef RTE_QAT_OPENSSL
96 		op_bpi_cipher_decrypt(last_block, dst, iv, block_len,
97 				last_block_len, ctx->bpi_ctx);
98 #else
99 		bpi_cipher_ipsec(last_block, dst, iv, last_block_len, ctx->expkey,
100 			ctx->mb_mgr, ctx->docsis_key_len);
101 #endif
102 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
103 		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
104 			last_block, last_block_len);
105 		if (sym_op->m_dst != NULL)
106 			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
107 			dst, last_block_len);
108 #endif
109 	}
110 
111 	return sym_op->cipher.data.length - last_block_len;
112 }
113 
114 static __rte_always_inline int
115 qat_auth_is_len_in_bits(struct qat_sym_session *ctx,
116 		struct rte_crypto_op *op)
117 {
118 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
119 		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
120 		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 ||
121 		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32 ||
122 		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64 ||
123 		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128) {
124 		if (unlikely((op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
125 				(op->sym->auth.data.length % BYTE_LENGTH != 0)))
126 			return -EINVAL;
127 		return 1;
128 	}
129 	return 0;
130 }
131 
132 static __rte_always_inline int
133 qat_cipher_is_len_in_bits(struct qat_sym_session *ctx,
134 		struct rte_crypto_op *op)
135 {
136 	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
137 		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
138 		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 ||
139 		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_256)  {
140 		if (unlikely((op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
141 			((op->sym->cipher.data.offset %
142 			BYTE_LENGTH) != 0)))
143 			return -EINVAL;
144 		return 1;
145 	}
146 	return 0;
147 }
148 
149 static __rte_always_inline int32_t
150 qat_sym_build_req_set_data(struct icp_qat_fw_la_bulk_req *req,
151 		void *opaque, struct qat_sym_op_cookie *cookie,
152 		struct rte_crypto_vec *src_vec, uint16_t n_src,
153 		struct rte_crypto_vec *dst_vec, uint16_t n_dst)
154 {
155 	struct qat_sgl *list;
156 	uint32_t i;
157 	uint32_t tl_src = 0, total_len_src, total_len_dst;
158 	uint64_t src_data_start = 0, dst_data_start = 0;
159 	int is_sgl = n_src > 1 || n_dst > 1;
160 
161 	if (unlikely(n_src < 1 || n_src > QAT_SYM_SGL_MAX_NUMBER ||
162 			n_dst > QAT_SYM_SGL_MAX_NUMBER))
163 		return -1;
164 
165 	if (likely(!is_sgl)) {
166 		src_data_start = src_vec[0].iova;
167 		tl_src = total_len_src =
168 				src_vec[0].len;
169 		if (unlikely(n_dst)) { /* oop */
170 			total_len_dst = dst_vec[0].len;
171 
172 			dst_data_start = dst_vec[0].iova;
173 			if (unlikely(total_len_src != total_len_dst))
174 				return -EINVAL;
175 		} else {
176 			dst_data_start = src_data_start;
177 			total_len_dst = tl_src;
178 		}
179 	} else { /* sgl */
180 		total_len_dst = total_len_src = 0;
181 
182 		ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
183 			QAT_COMN_PTR_TYPE_SGL);
184 
185 		list = (struct qat_sgl *)&cookie->qat_sgl_src;
186 		for (i = 0; i < n_src; i++) {
187 			list->buffers[i].len = src_vec[i].len;
188 			list->buffers[i].resrvd = 0;
189 			list->buffers[i].addr = src_vec[i].iova;
190 			if (tl_src + src_vec[i].len > UINT32_MAX) {
191 				QAT_DP_LOG(ERR, "Message too long");
192 				return -1;
193 			}
194 			tl_src += src_vec[i].len;
195 		}
196 
197 		list->num_bufs = i;
198 		src_data_start = cookie->qat_sgl_src_phys_addr;
199 
200 		if (unlikely(n_dst > 0)) { /* oop sgl */
201 			uint32_t tl_dst = 0;
202 
203 			list = (struct qat_sgl *)&cookie->qat_sgl_dst;
204 
205 			for (i = 0; i < n_dst; i++) {
206 				list->buffers[i].len = dst_vec[i].len;
207 				list->buffers[i].resrvd = 0;
208 				list->buffers[i].addr = dst_vec[i].iova;
209 				if (tl_dst + dst_vec[i].len > UINT32_MAX) {
210 					QAT_DP_LOG(ERR, "Message too long");
211 					return -ENOTSUP;
212 				}
213 
214 				tl_dst += dst_vec[i].len;
215 			}
216 
217 			if (tl_src != tl_dst)
218 				return -EINVAL;
219 			list->num_bufs = i;
220 			dst_data_start = cookie->qat_sgl_dst_phys_addr;
221 		} else
222 			dst_data_start = src_data_start;
223 	}
224 
225 	req->comn_mid.src_data_addr = src_data_start;
226 	req->comn_mid.dest_data_addr = dst_data_start;
227 	req->comn_mid.src_length = total_len_src;
228 	req->comn_mid.dst_length = total_len_dst;
229 	req->comn_mid.opaque_data = (uintptr_t)opaque;
230 
231 	return tl_src;
232 }
233 
234 static __rte_always_inline uint64_t
235 qat_sym_convert_op_to_vec_cipher(struct rte_crypto_op *op,
236 		struct qat_sym_session *ctx,
237 		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
238 		struct rte_crypto_va_iova_ptr *cipher_iv,
239 		struct rte_crypto_va_iova_ptr *auth_iv_or_aad __rte_unused,
240 		struct rte_crypto_va_iova_ptr *digest __rte_unused)
241 {
242 	uint32_t cipher_len = 0, cipher_ofs = 0;
243 	int n_src = 0;
244 	int ret;
245 
246 	ret = qat_cipher_is_len_in_bits(ctx, op);
247 	switch (ret) {
248 	case 1:
249 		cipher_len = op->sym->cipher.data.length >> 3;
250 		cipher_ofs = op->sym->cipher.data.offset >> 3;
251 		break;
252 	case 0:
253 
254 #ifdef RTE_QAT_OPENSSL
255 		if (ctx->bpi_ctx) {
256 #else
257 		if (ctx->mb_mgr) {
258 #endif
259 			/* DOCSIS - only send complete blocks to device.
260 			 * Process any partial block using CFB mode.
261 			 * Even if 0 complete blocks, still send this to device
262 			 * to get into rx queue for post-process and dequeuing
263 			 */
264 			cipher_len = qat_bpicipher_preprocess(ctx, op);
265 			cipher_ofs = op->sym->cipher.data.offset;
266 		} else {
267 			cipher_len = op->sym->cipher.data.length;
268 			cipher_ofs = op->sym->cipher.data.offset;
269 		}
270 		break;
271 	default:
272 		QAT_DP_LOG(ERR,
273 	  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
274 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
275 		return UINT64_MAX;
276 	}
277 
278 	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
279 			ctx->cipher_iv.offset);
280 	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
281 			ctx->cipher_iv.offset);
282 
283 	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs,
284 			cipher_len, in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
285 	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
286 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
287 		return UINT64_MAX;
288 	}
289 
290 	in_sgl->num = n_src;
291 
292 	/* Out-Of-Place operation */
293 	if (unlikely((op->sym->m_dst != NULL) &&
294 			(op->sym->m_dst != op->sym->m_src))) {
295 		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
296 				cipher_len, out_sgl->vec,
297 				QAT_SYM_SGL_MAX_NUMBER);
298 
299 		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
300 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
301 			return UINT64_MAX;
302 		}
303 
304 		out_sgl->num = n_dst;
305 	} else
306 		out_sgl->num = 0;
307 
308 	return 0;
309 }
310 
311 static __rte_always_inline uint64_t
312 qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op,
313 		struct qat_sym_session *ctx,
314 		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
315 		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
316 		struct rte_crypto_va_iova_ptr *auth_iv,
317 		struct rte_crypto_va_iova_ptr *digest,
318 		struct qat_sym_op_cookie *cookie)
319 {
320 	uint32_t auth_ofs = 0, auth_len = 0;
321 	int n_src, ret;
322 
323 	ret = qat_auth_is_len_in_bits(ctx, op);
324 	switch (ret) {
325 	case 1:
326 		auth_ofs = op->sym->auth.data.offset >> 3;
327 		auth_len = op->sym->auth.data.length >> 3;
328 		auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
329 				ctx->auth_iv.offset);
330 		auth_iv->iova = rte_crypto_op_ctophys_offset(op,
331 				ctx->auth_iv.offset);
332 		break;
333 	case 0:
334 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
335 			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
336 			/* AES-GMAC */
337 			auth_ofs = op->sym->auth.data.offset;
338 			auth_len = op->sym->auth.data.length;
339 			auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
340 					ctx->auth_iv.offset);
341 			auth_iv->iova = rte_crypto_op_ctophys_offset(op,
342 					ctx->auth_iv.offset);
343 		} else {
344 			auth_ofs = op->sym->auth.data.offset;
345 			auth_len = op->sym->auth.data.length;
346 			auth_iv->va = NULL;
347 			auth_iv->iova = 0;
348 		}
349 		break;
350 	default:
351 		QAT_DP_LOG(ERR,
352 	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
353 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
354 		return UINT64_MAX;
355 	}
356 
357 	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, auth_ofs,
358 			auth_len, in_sgl->vec,
359 			QAT_SYM_SGL_MAX_NUMBER);
360 	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
361 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
362 		return UINT64_MAX;
363 	}
364 
365 	in_sgl->num = n_src;
366 
367 	/* Out-Of-Place operation */
368 	if (unlikely((op->sym->m_dst != NULL) &&
369 			(op->sym->m_dst != op->sym->m_src))) {
370 		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, auth_ofs,
371 				auth_len, out_sgl->vec,
372 				QAT_SYM_SGL_MAX_NUMBER);
373 
374 		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
375 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
376 			return UINT64_MAX;
377 		}
378 		out_sgl->num = n_dst;
379 	} else
380 		out_sgl->num = 0;
381 
382 	digest->va = (void *)op->sym->auth.digest.data;
383 
384 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL)
385 		digest->iova = cookie->digest_null_phys_addr;
386 	else
387 		digest->iova = op->sym->auth.digest.phys_addr;
388 
389 	return 0;
390 }
391 
392 static __rte_always_inline uint64_t
393 qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op,
394 		struct qat_sym_session *ctx,
395 		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
396 		struct rte_crypto_va_iova_ptr *cipher_iv,
397 		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
398 		struct rte_crypto_va_iova_ptr *digest,
399 		struct qat_sym_op_cookie *cookie)
400 {
401 	union rte_crypto_sym_ofs ofs;
402 	uint32_t max_len = 0;
403 	uint32_t cipher_len = 0, cipher_ofs = 0;
404 	uint32_t auth_len = 0, auth_ofs = 0;
405 	int is_oop = (op->sym->m_dst != NULL) &&
406 			(op->sym->m_dst != op->sym->m_src);
407 	int is_sgl = op->sym->m_src->nb_segs > 1;
408 	int is_bpi = 0;
409 	int n_src;
410 	int ret;
411 
412 	if (unlikely(is_oop))
413 		is_sgl |= op->sym->m_dst->nb_segs > 1;
414 
415 	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
416 			ctx->cipher_iv.offset);
417 	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
418 			ctx->cipher_iv.offset);
419 	auth_iv_or_aad->va = rte_crypto_op_ctod_offset(op, void *,
420 			ctx->auth_iv.offset);
421 	auth_iv_or_aad->iova = rte_crypto_op_ctophys_offset(op,
422 			ctx->auth_iv.offset);
423 	digest->va = (void *)op->sym->auth.digest.data;
424 
425 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL)
426 		digest->iova = cookie->digest_null_phys_addr;
427 	else
428 		digest->iova = op->sym->auth.digest.phys_addr;
429 
430 	ret = qat_cipher_is_len_in_bits(ctx, op);
431 	switch (ret) {
432 	case 1:
433 		cipher_len = op->sym->cipher.data.length >> 3;
434 		cipher_ofs = op->sym->cipher.data.offset >> 3;
435 		break;
436 	case 0:
437 #ifdef RTE_QAT_OPENSSL
438 		if (ctx->bpi_ctx) {
439 #else
440 		if (ctx->mb_mgr) {
441 #endif
442 			cipher_len = qat_bpicipher_preprocess(ctx, op);
443 			cipher_ofs = op->sym->cipher.data.offset;
444 			is_bpi = 1;
445 		} else {
446 			cipher_len = op->sym->cipher.data.length;
447 			cipher_ofs = op->sym->cipher.data.offset;
448 		}
449 		break;
450 	default:
451 		QAT_DP_LOG(ERR,
452 	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
453 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
454 		return -EINVAL;
455 	}
456 
457 	ret = qat_auth_is_len_in_bits(ctx, op);
458 	switch (ret) {
459 	case 1:
460 		auth_len = op->sym->auth.data.length >> 3;
461 		auth_ofs = op->sym->auth.data.offset >> 3;
462 		break;
463 	case 0:
464 		auth_len = op->sym->auth.data.length;
465 		auth_ofs = op->sym->auth.data.offset;
466 		break;
467 	default:
468 		QAT_DP_LOG(ERR,
469 	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
470 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
471 		return -EINVAL;
472 	}
473 
474 	max_len = RTE_MAX(cipher_ofs + cipher_len, auth_ofs + auth_len);
475 
476 	/* digest in buffer check. Needed only for wireless algos
477 	 * or combined cipher-crc operations
478 	 */
479 	if (ret == 1 || is_bpi) {
480 		/* Handle digest-encrypted cases, i.e.
481 		 * auth-gen-then-cipher-encrypt and
482 		 * cipher-decrypt-then-auth-verify
483 		 */
484 		uint64_t auth_end_iova;
485 
486 		if (unlikely(is_sgl)) {
487 			uint32_t remaining_off = auth_ofs + auth_len;
488 			struct rte_mbuf *sgl_buf = (is_oop ? op->sym->m_dst :
489 				op->sym->m_src);
490 
491 			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
492 					&& sgl_buf->next != NULL) {
493 				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
494 				sgl_buf = sgl_buf->next;
495 			}
496 
497 			auth_end_iova = (uint64_t)rte_pktmbuf_iova_offset(
498 				sgl_buf, remaining_off);
499 		} else
500 			auth_end_iova = (is_oop ?
501 				rte_pktmbuf_iova(op->sym->m_dst) :
502 				rte_pktmbuf_iova(op->sym->m_src)) + auth_ofs +
503 					auth_len;
504 
505 		/* Then check if digest-encrypted conditions are met */
506 		if (((auth_ofs + auth_len < cipher_ofs + cipher_len) &&
507 				(digest->iova == auth_end_iova)) ||
508 #ifdef RTE_QAT_OPENSSL
509 				ctx->bpi_ctx)
510 #else
511 				ctx->mb_mgr)
512 #endif
513 			max_len = RTE_MAX(max_len, auth_ofs + auth_len +
514 					ctx->digest_length);
515 	}
516 
517 	/* Passing 0 as cipher & auth offsets are assigned into ofs later */
518 	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, 0, max_len,
519 			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
520 	if (unlikely(n_src < 0 || n_src > op->sym->m_src->nb_segs)) {
521 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
522 		return -1;
523 	}
524 	in_sgl->num = n_src;
525 
526 	if (unlikely((op->sym->m_dst != NULL) &&
527 			(op->sym->m_dst != op->sym->m_src))) {
528 		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, 0,
529 				max_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
530 
531 		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
532 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
533 			return -1;
534 		}
535 		out_sgl->num = n_dst;
536 	} else
537 		out_sgl->num = 0;
538 
539 	ofs.ofs.cipher.head = cipher_ofs;
540 	ofs.ofs.cipher.tail = max_len - cipher_ofs - cipher_len;
541 	ofs.ofs.auth.head = auth_ofs;
542 	ofs.ofs.auth.tail = max_len - auth_ofs - auth_len;
543 
544 	return ofs.raw;
545 }
546 
547 static __rte_always_inline uint64_t
548 qat_sym_convert_op_to_vec_aead(struct rte_crypto_op *op,
549 		struct qat_sym_session *ctx,
550 		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
551 		struct rte_crypto_va_iova_ptr *cipher_iv,
552 		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
553 		struct rte_crypto_va_iova_ptr *digest)
554 {
555 	uint32_t cipher_len = 0, cipher_ofs = 0;
556 	int32_t n_src = 0;
557 
558 	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
559 			ctx->cipher_iv.offset);
560 	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
561 			ctx->cipher_iv.offset);
562 	auth_iv_or_aad->va = (void *)op->sym->aead.aad.data;
563 	auth_iv_or_aad->iova = op->sym->aead.aad.phys_addr;
564 	digest->va = (void *)op->sym->aead.digest.data;
565 	digest->iova = op->sym->aead.digest.phys_addr;
566 
567 	cipher_len = op->sym->aead.data.length;
568 	cipher_ofs = op->sym->aead.data.offset;
569 
570 	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs, cipher_len,
571 			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
572 	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
573 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
574 		return UINT64_MAX;
575 	}
576 	in_sgl->num = n_src;
577 
578 	/* Out-Of-Place operation */
579 	if (unlikely((op->sym->m_dst != NULL) &&
580 			(op->sym->m_dst != op->sym->m_src))) {
581 		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
582 				cipher_len, out_sgl->vec,
583 				QAT_SYM_SGL_MAX_NUMBER);
584 		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
585 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
586 			return UINT64_MAX;
587 		}
588 
589 		out_sgl->num = n_dst;
590 	} else
591 		out_sgl->num = 0;
592 
593 	return 0;
594 }
595 
596 static inline void
597 zuc256_modify_iv(uint8_t *iv)
598 {
599 	uint8_t iv_tmp[8];
600 
601 	iv_tmp[0] = iv[16];
602 	/* pack the last 8 bytes of IV to 6 bytes.
603 	 * discard the 2 MSB bits of each byte
604 	 */
605 	iv_tmp[1] = (((iv[17] & 0x3f) << 2) | ((iv[18] >> 4) & 0x3));
606 	iv_tmp[2] = (((iv[18] & 0xf) << 4) | ((iv[19] >> 2) & 0xf));
607 	iv_tmp[3] = (((iv[19] & 0x3) << 6) | (iv[20] & 0x3f));
608 
609 	iv_tmp[4] = (((iv[21] & 0x3f) << 2) | ((iv[22] >> 4) & 0x3));
610 	iv_tmp[5] = (((iv[22] & 0xf) << 4) | ((iv[23] >> 2) & 0xf));
611 	iv_tmp[6] = (((iv[23] & 0x3) << 6) | (iv[24] & 0x3f));
612 
613 	memcpy(iv + 16, iv_tmp, 8);
614 }
615 
616 static __rte_always_inline void
617 qat_set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
618 		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
619 		struct icp_qat_fw_la_bulk_req *qat_req)
620 {
621 	/* copy IV into request if it fits */
622 	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
623 		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
624 				iv_len);
625 	else {
626 		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
627 				qat_req->comn_hdr.serv_specif_flags,
628 				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
629 		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
630 	}
631 }
632 
633 static __rte_always_inline void
634 qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
635 {
636 	uint32_t i;
637 
638 	for (i = 0; i < n; i++)
639 		sta[i] = status;
640 }
641 
642 static __rte_always_inline void
643 enqueue_one_cipher_job_gen1(struct qat_sym_session *ctx,
644 	struct icp_qat_fw_la_bulk_req *req,
645 	struct rte_crypto_va_iova_ptr *iv,
646 	union rte_crypto_sym_ofs ofs, uint32_t data_len,
647 	struct qat_sym_op_cookie *cookie)
648 {
649 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
650 
651 	cipher_param = (void *)&req->serv_specif_rqpars;
652 
653 	/* cipher IV */
654 	qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
655 	cipher_param->cipher_offset = ofs.ofs.cipher.head;
656 	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
657 			ofs.ofs.cipher.tail;
658 
659 	if (AES_OR_3DES_MISALIGNED) {
660 		QAT_LOG(DEBUG,
661 	  "Input cipher buffer misalignment detected and change job as NULL operation");
662 		struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
663 		header->service_type = ICP_QAT_FW_COMN_REQ_NULL;
664 		header->service_cmd_id = ICP_QAT_FW_NULL_REQ_SERV_ID;
665 		cookie->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
666 	}
667 }
668 
669 static __rte_always_inline void
670 enqueue_one_auth_job_gen1(struct qat_sym_session *ctx,
671 	struct icp_qat_fw_la_bulk_req *req,
672 	struct rte_crypto_va_iova_ptr *digest,
673 	struct rte_crypto_va_iova_ptr *auth_iv,
674 	union rte_crypto_sym_ofs ofs, uint32_t data_len)
675 {
676 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
677 	struct icp_qat_fw_la_auth_req_params *auth_param;
678 
679 	cipher_param = (void *)&req->serv_specif_rqpars;
680 	auth_param = (void *)((uint8_t *)cipher_param +
681 			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
682 
683 	auth_param->auth_off = ofs.ofs.auth.head;
684 	auth_param->auth_len = data_len - ofs.ofs.auth.head -
685 			ofs.ofs.auth.tail;
686 	auth_param->auth_res_addr = digest->iova;
687 
688 	switch (ctx->qat_hash_alg) {
689 	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
690 	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
691 	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
692 	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32:
693 	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64:
694 	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128:
695 		auth_param->u1.aad_adr = auth_iv->iova;
696 		break;
697 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
698 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
699 		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
700 			req->comn_hdr.serv_specif_flags,
701 				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
702 		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
703 				ctx->auth_iv.length);
704 		break;
705 	case ICP_QAT_HW_AUTH_ALGO_SM3:
706 		if (ctx->auth_mode == ICP_QAT_HW_AUTH_MODE0)
707 			auth_param->u1.aad_adr = 0;
708 		else
709 			auth_param->u1.aad_adr = ctx->prefix_paddr;
710 		break;
711 	default:
712 		break;
713 	}
714 }
715 
716 static __rte_always_inline int
717 enqueue_one_chain_job_gen1(struct qat_sym_session *ctx,
718 	struct icp_qat_fw_la_bulk_req *req,
719 	struct rte_crypto_vec *src_vec,
720 	uint16_t n_src_vecs,
721 	struct rte_crypto_vec *dst_vec,
722 	uint16_t n_dst_vecs,
723 	struct rte_crypto_va_iova_ptr *cipher_iv,
724 	struct rte_crypto_va_iova_ptr *digest,
725 	struct rte_crypto_va_iova_ptr *auth_iv,
726 	union rte_crypto_sym_ofs ofs, uint32_t data_len,
727 	struct qat_sym_op_cookie *cookie)
728 {
729 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
730 	struct icp_qat_fw_la_auth_req_params *auth_param;
731 	struct rte_crypto_vec *cvec = n_dst_vecs > 0 ?
732 			dst_vec : src_vec;
733 	rte_iova_t auth_iova_end;
734 	int cipher_len, auth_len;
735 	int is_sgl = n_src_vecs > 1 || n_dst_vecs > 1;
736 
737 	cipher_param = (void *)&req->serv_specif_rqpars;
738 	auth_param = (void *)((uint8_t *)cipher_param +
739 			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
740 
741 	cipher_len = data_len - ofs.ofs.cipher.head -
742 			ofs.ofs.cipher.tail;
743 	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
744 
745 	if (unlikely(cipher_len < 0 || auth_len < 0))
746 		return -1;
747 
748 	cipher_param->cipher_offset = ofs.ofs.cipher.head;
749 	cipher_param->cipher_length = cipher_len;
750 	qat_set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
751 
752 	auth_param->auth_off = ofs.ofs.auth.head;
753 	auth_param->auth_len = auth_len;
754 	auth_param->auth_res_addr = digest->iova;
755 	/* Input cipher length alignment requirement for 3DES-CBC and AES-CBC.
756 	 * For 3DES-CBC cipher algo, ESP Payload size requires 8 Byte aligned.
757 	 * For AES-CBC cipher algo, ESP Payload size requires 16 Byte aligned.
758 	 * The alignment should be guaranteed by the ESP package padding field
759 	 * according to the RFC4303. Under this condition, QAT will pass through
760 	 * chain job as NULL cipher and NULL auth operation and report misalignment
761 	 * error detected.
762 	 */
763 	if (AES_OR_3DES_MISALIGNED) {
764 		QAT_LOG(DEBUG,
765 	  "Input cipher buffer misalignment detected and change job as NULL operation");
766 		struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
767 		header->service_type = ICP_QAT_FW_COMN_REQ_NULL;
768 		header->service_cmd_id = ICP_QAT_FW_NULL_REQ_SERV_ID;
769 		cookie->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
770 		return -1;
771 	}
772 
773 	switch (ctx->qat_hash_alg) {
774 	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
775 	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
776 	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
777 	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32:
778 	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64:
779 	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128:
780 		auth_param->u1.aad_adr = auth_iv->iova;
781 		break;
782 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
783 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
784 		break;
785 	case ICP_QAT_HW_AUTH_ALGO_SM3:
786 		if (ctx->auth_mode == ICP_QAT_HW_AUTH_MODE0)
787 			auth_param->u1.aad_adr = 0;
788 		else
789 			auth_param->u1.aad_adr = ctx->prefix_paddr;
790 		break;
791 	default:
792 		break;
793 	}
794 
795 	if (unlikely(is_sgl)) {
796 		/* sgl */
797 		int i = n_dst_vecs ? n_dst_vecs : n_src_vecs;
798 		uint32_t remaining_off = data_len - ofs.ofs.auth.tail;
799 
800 		while (remaining_off >= cvec->len && i >= 1) {
801 			i--;
802 			remaining_off -= cvec->len;
803 			if (i)
804 				cvec++;
805 		}
806 
807 		auth_iova_end = cvec->iova + remaining_off;
808 	} else
809 		auth_iova_end = cvec[0].iova + auth_param->auth_off +
810 			auth_param->auth_len;
811 
812 	/* Then check if digest-encrypted conditions are met */
813 	if (((auth_param->auth_off + auth_param->auth_len <
814 		cipher_param->cipher_offset + cipher_param->cipher_length) &&
815 			(digest->iova == auth_iova_end)) ||
816 #ifdef RTE_QAT_OPENSSL
817 			ctx->bpi_ctx) {
818 #else
819 			ctx->mb_mgr) {
820 #endif
821 		/* Handle partial digest encryption */
822 		if (cipher_param->cipher_offset + cipher_param->cipher_length <
823 			auth_param->auth_off + auth_param->auth_len +
824 				ctx->digest_length && !is_sgl)
825 			req->comn_mid.dst_length = req->comn_mid.src_length =
826 				auth_param->auth_off + auth_param->auth_len +
827 					ctx->digest_length;
828 		struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
829 		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
830 			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
831 	}
832 
833 	return 0;
834 }
835 
836 static __rte_always_inline void
837 enqueue_one_aead_job_gen1(struct qat_sym_session *ctx,
838 	struct icp_qat_fw_la_bulk_req *req,
839 	struct rte_crypto_va_iova_ptr *iv,
840 	struct rte_crypto_va_iova_ptr *digest,
841 	struct rte_crypto_va_iova_ptr *aad,
842 	union rte_crypto_sym_ofs ofs, uint32_t data_len)
843 {
844 	struct icp_qat_fw_la_cipher_req_params *cipher_param =
845 		(void *)&req->serv_specif_rqpars;
846 	struct icp_qat_fw_la_auth_req_params *auth_param =
847 		(void *)((uint8_t *)&req->serv_specif_rqpars +
848 		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
849 	uint8_t *aad_data;
850 	uint8_t aad_ccm_real_len;
851 	uint8_t aad_len_field_sz;
852 	uint32_t msg_len_be;
853 	rte_iova_t aad_iova = 0;
854 	uint8_t q;
855 
856 	switch (ctx->qat_hash_alg) {
857 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
858 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
859 		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
860 			req->comn_hdr.serv_specif_flags,
861 				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
862 		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
863 				ctx->cipher_iv.length);
864 		aad_iova = aad->iova;
865 		break;
866 	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
867 		aad_data = aad->va;
868 		aad_iova = aad->iova;
869 		aad_ccm_real_len = 0;
870 		aad_len_field_sz = 0;
871 		msg_len_be = rte_bswap32((uint32_t)data_len -
872 				ofs.ofs.cipher.head);
873 
874 		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
875 			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
876 			aad_ccm_real_len = ctx->aad_len -
877 				ICP_QAT_HW_CCM_AAD_B0_LEN -
878 				ICP_QAT_HW_CCM_AAD_LEN_INFO;
879 		} else {
880 			aad_data = iv->va;
881 			aad_iova = iv->iova;
882 		}
883 
884 		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
885 		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
886 			aad_len_field_sz, ctx->digest_length, q);
887 		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
888 			memcpy(aad_data	+ ctx->cipher_iv.length +
889 				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
890 				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
891 				(uint8_t *)&msg_len_be,
892 				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
893 		} else {
894 			memcpy(aad_data	+ ctx->cipher_iv.length +
895 				ICP_QAT_HW_CCM_NONCE_OFFSET,
896 				(uint8_t *)&msg_len_be +
897 				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
898 				- q), q);
899 		}
900 
901 		if (aad_len_field_sz > 0) {
902 			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
903 				rte_bswap16(aad_ccm_real_len);
904 
905 			if ((aad_ccm_real_len + aad_len_field_sz)
906 				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
907 				uint8_t pad_len = 0;
908 				uint8_t pad_idx = 0;
909 
910 				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
911 					((aad_ccm_real_len +
912 					aad_len_field_sz) %
913 					ICP_QAT_HW_CCM_AAD_B0_LEN);
914 				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
915 					aad_ccm_real_len +
916 					aad_len_field_sz;
917 				memset(&aad_data[pad_idx], 0, pad_len);
918 			}
919 		}
920 
921 		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
922 			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
923 			(uint8_t *)iv->va +
924 			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
925 		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
926 			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
927 
928 		rte_memcpy((uint8_t *)aad->va +
929 				ICP_QAT_HW_CCM_NONCE_OFFSET,
930 			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
931 			ctx->cipher_iv.length);
932 		break;
933 	default:
934 		break;
935 	}
936 
937 	cipher_param->cipher_offset = ofs.ofs.cipher.head;
938 	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
939 			ofs.ofs.cipher.tail;
940 	auth_param->auth_off = ofs.ofs.cipher.head;
941 	auth_param->auth_len = cipher_param->cipher_length;
942 	auth_param->auth_res_addr = digest->iova;
943 	auth_param->u1.aad_adr = aad_iova;
944 }
945 
946 extern struct rte_cryptodev_ops qat_sym_crypto_ops_gen1;
947 extern struct rte_cryptodev_ops qat_asym_crypto_ops_gen1;
948 
949 /* -----------------GEN 1 sym crypto op data path APIs ---------------- */
950 int
951 qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
952 	uint8_t *out_msg, void *op_cookie);
953 
954 int
955 qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
956 		uint8_t *out_msg, void *op_cookie);
957 
958 int
959 qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
960 		uint8_t *out_msg, void *op_cookie);
961 
962 int
963 qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
964 		uint8_t *out_msg, void *op_cookie);
965 
966 /* -----------------GEN 1 sym crypto raw data path APIs ---------------- */
967 int
968 qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
969 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
970 	union rte_crypto_sym_ofs ofs,
971 	struct rte_crypto_va_iova_ptr *iv,
972 	struct rte_crypto_va_iova_ptr *digest __rte_unused,
973 	struct rte_crypto_va_iova_ptr *aad __rte_unused,
974 	void *user_data);
975 
976 uint32_t
977 qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
978 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
979 	void *user_data[], int *status);
980 
981 int
982 qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
983 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
984 	union rte_crypto_sym_ofs ofs,
985 	struct rte_crypto_va_iova_ptr *iv __rte_unused,
986 	struct rte_crypto_va_iova_ptr *digest,
987 	struct rte_crypto_va_iova_ptr *auth_iv,
988 	void *user_data);
989 
990 uint32_t
991 qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
992 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
993 	void *user_data[], int *status);
994 
995 int
996 qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
997 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
998 	union rte_crypto_sym_ofs ofs,
999 	struct rte_crypto_va_iova_ptr *cipher_iv,
1000 	struct rte_crypto_va_iova_ptr *digest,
1001 	struct rte_crypto_va_iova_ptr *auth_iv,
1002 	void *user_data);
1003 
1004 uint32_t
1005 qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
1006 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1007 	void *user_data[], int *status);
1008 
1009 int
1010 qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
1011 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
1012 	union rte_crypto_sym_ofs ofs,
1013 	struct rte_crypto_va_iova_ptr *iv,
1014 	struct rte_crypto_va_iova_ptr *digest,
1015 	struct rte_crypto_va_iova_ptr *aad,
1016 	void *user_data);
1017 
1018 uint32_t
1019 qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
1020 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1021 	void *user_data[], int *status);
1022 
1023 void *
1024 qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
1025 	int *dequeue_status, enum rte_crypto_op_status *op_status);
1026 
1027 uint32_t
1028 qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
1029 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1030 	uint32_t max_nb_to_dequeue,
1031 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1032 	void **out_user_data, uint8_t is_user_data_array,
1033 	uint32_t *n_success_jobs, int *return_status);
1034 
1035 int
1036 qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
1037 
1038 int
1039 qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
1040 
1041 int
1042 qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx);
1043 
1044 /* -----------------GENx control path APIs ---------------- */
1045 uint64_t
1046 qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
1047 
1048 int
1049 qat_sym_crypto_set_session_gen1(void *cryptodev, void *session);
1050 
1051 int
1052 qat_sym_crypto_set_session_gen4(void *cryptodev, void *session);
1053 
1054 void
1055 qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
1056 		uint8_t hash_flag);
1057 
1058 int
1059 qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx);
1060 
1061 int
1062 qat_asym_crypto_cap_get_gen1(struct qat_cryptodev_private *internals,
1063 			const char *capa_memz_name, const uint16_t slice_map);
1064 
1065 uint64_t
1066 qat_asym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
1067 
1068 int
1069 qat_asym_crypto_set_session_gen1(void *cryptodev, void *session);
1070 
1071 extern struct rte_security_ops security_qat_ops_gen1;
1072 
1073 void *
1074 qat_sym_create_security_gen1(void *cryptodev);
1075 
1076 #endif
1077