xref: /dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c (revision 12d98eceb8ac89d6284a2a56f9b83cca40b73e80)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2021-2022, 2024 NXP
3  */
4 
5 #include <cryptodev_pmd.h>
6 #include <bus_fslmc_driver.h>
7 #include <fslmc_vfio.h>
8 #include <dpaa2_hw_pvt.h>
9 #include <dpaa2_hw_dpio.h>
10 
11 #include "dpaa2_sec_priv.h"
12 #include "dpaa2_sec_logs.h"
13 
14 #include <desc/algo.h>
15 
16 struct dpaa2_sec_raw_dp_ctx {
17 	dpaa2_sec_session *session;
18 	uint32_t tail;
19 	uint32_t head;
20 	uint16_t cached_enqueue;
21 	uint16_t cached_dequeue;
22 };
23 
24 static int
25 build_raw_dp_chain_fd(uint8_t *drv_ctx,
26 		       struct rte_crypto_sgl *sgl,
27 		       struct rte_crypto_sgl *dest_sgl,
28 		       struct rte_crypto_va_iova_ptr *iv,
29 		       struct rte_crypto_va_iova_ptr *digest,
30 		       struct rte_crypto_va_iova_ptr *auth_iv,
31 		       union rte_crypto_sym_ofs ofs,
32 		       void *userdata,
33 		       struct qbman_fd *fd)
34 {
35 	RTE_SET_USED(auth_iv);
36 
37 	dpaa2_sec_session *sess =
38 		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
39 	struct ctxt_priv *priv = sess->ctxt;
40 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
41 	struct sec_flow_context *flc;
42 	int data_len = 0, auth_len = 0, cipher_len = 0;
43 	unsigned int i = 0;
44 	uint16_t auth_hdr_len = ofs.ofs.cipher.head -
45 				ofs.ofs.auth.head;
46 
47 	uint16_t auth_tail_len;
48 	uint32_t auth_only_len;
49 	int icv_len = sess->digest_length;
50 	uint8_t *old_icv;
51 	uint8_t *iv_ptr = iv->va;
52 
53 	for (i = 0; i < sgl->num; i++)
54 		data_len += sgl->vec[i].len;
55 
56 	cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
57 	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
58 	auth_tail_len = auth_len - cipher_len - auth_hdr_len;
59 	auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
60 	/* first FLE entry used to store session ctxt */
61 	fle = (struct qbman_fle *)rte_malloc(NULL,
62 			FLE_SG_MEM_SIZE(2 * sgl->num),
63 			RTE_CACHE_LINE_SIZE);
64 	if (unlikely(!fle)) {
65 		DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
66 		return -ENOMEM;
67 	}
68 	memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
69 	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
70 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
71 
72 	op_fle = fle + 1;
73 	ip_fle = fle + 2;
74 	sge = fle + 3;
75 
76 	/* Save the shared descriptor */
77 	flc = &priv->flc_desc[0].flc;
78 
79 	/* Configure FD as a FRAME LIST */
80 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
81 	DPAA2_SET_FD_COMPOUND_FMT(fd);
82 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
83 
84 	/* Configure Output FLE with Scatter/Gather Entry */
85 	DPAA2_SET_FLE_SG_EXT(op_fle);
86 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
87 
88 	if (auth_only_len)
89 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
90 
91 	op_fle->length = (sess->dir == DIR_ENC) ?
92 			(cipher_len + icv_len) :
93 			cipher_len;
94 
95 	/* OOP */
96 	if (dest_sgl) {
97 		/* Configure Output SGE for Encap/Decap */
98 		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova + ofs.ofs.cipher.head);
99 		sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
100 
101 		/* o/p segs */
102 		for (i = 1; i < dest_sgl->num; i++) {
103 			sge++;
104 			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
105 			sge->length = dest_sgl->vec[i].len;
106 		}
107 		sge->length -= ofs.ofs.cipher.tail;
108 	} else {
109 		/* Configure Output SGE for Encap/Decap */
110 		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova + ofs.ofs.cipher.head);
111 		sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
112 
113 		/* o/p segs */
114 		for (i = 1; i < sgl->num; i++) {
115 			sge++;
116 			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
117 			sge->length = sgl->vec[i].len;
118 		}
119 		sge->length -= ofs.ofs.cipher.tail;
120 	}
121 
122 	if (sess->dir == DIR_ENC) {
123 		sge++;
124 		DPAA2_SET_FLE_ADDR(sge,
125 			digest->iova);
126 		sge->length = icv_len;
127 	}
128 	DPAA2_SET_FLE_FIN(sge);
129 
130 	sge++;
131 
132 	/* Configure Input FLE with Scatter/Gather Entry */
133 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
134 	DPAA2_SET_FLE_SG_EXT(ip_fle);
135 	DPAA2_SET_FLE_FIN(ip_fle);
136 
137 	ip_fle->length = (sess->dir == DIR_ENC) ?
138 			(auth_len + sess->iv.length) :
139 			(auth_len + sess->iv.length +
140 			icv_len);
141 
142 	/* Configure Input SGE for Encap/Decap */
143 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
144 	sge->length = sess->iv.length;
145 
146 	sge++;
147 	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova + ofs.ofs.auth.head);
148 	sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
149 
150 	for (i = 1; i < sgl->num; i++) {
151 		sge++;
152 		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
153 		sge->length = sgl->vec[i].len;
154 	}
155 
156 	if (sess->dir == DIR_DEC) {
157 		sge++;
158 		old_icv = (uint8_t *)(sge + 1);
159 		memcpy(old_icv, digest->va,
160 			icv_len);
161 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
162 		sge->length = icv_len;
163 	}
164 
165 	DPAA2_SET_FLE_FIN(sge);
166 	if (auth_only_len) {
167 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
168 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
169 	}
170 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
171 
172 	return 0;
173 }
174 
175 static int
176 build_raw_dp_aead_fd(uint8_t *drv_ctx,
177 		       struct rte_crypto_sgl *sgl,
178 		       struct rte_crypto_sgl *dest_sgl,
179 		       struct rte_crypto_va_iova_ptr *iv,
180 		       struct rte_crypto_va_iova_ptr *digest,
181 		       struct rte_crypto_va_iova_ptr *auth_iv,
182 		       union rte_crypto_sym_ofs ofs,
183 		       void *userdata,
184 		       struct qbman_fd *fd)
185 {
186 	dpaa2_sec_session *sess =
187 		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
188 	struct ctxt_priv *priv = sess->ctxt;
189 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
190 	struct sec_flow_context *flc;
191 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
192 	int icv_len = sess->digest_length;
193 	uint8_t *old_icv;
194 	uint8_t *IV_ptr = iv->va;
195 	unsigned int i = 0;
196 	int data_len = 0, aead_len = 0;
197 
198 	for (i = 0; i < sgl->num; i++)
199 		data_len += sgl->vec[i].len;
200 
201 	aead_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
202 
203 	/* first FLE entry used to store mbuf and session ctxt */
204 	fle = (struct qbman_fle *)rte_malloc(NULL,
205 			FLE_SG_MEM_SIZE(2 * sgl->num),
206 			RTE_CACHE_LINE_SIZE);
207 	if (unlikely(!fle)) {
208 		DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
209 		return -ENOMEM;
210 	}
211 	memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
212 	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
213 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
214 
215 	op_fle = fle + 1;
216 	ip_fle = fle + 2;
217 	sge = fle + 3;
218 
219 	/* Save the shared descriptor */
220 	flc = &priv->flc_desc[0].flc;
221 
222 	/* Configure FD as a FRAME LIST */
223 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
224 	DPAA2_SET_FD_COMPOUND_FMT(fd);
225 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
226 
227 	/* Configure Output FLE with Scatter/Gather Entry */
228 	DPAA2_SET_FLE_SG_EXT(op_fle);
229 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
230 
231 	if (auth_only_len)
232 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
233 
234 	op_fle->length = (sess->dir == DIR_ENC) ?
235 			(aead_len + icv_len) :
236 			aead_len;
237 
238 	/* OOP */
239 	if (dest_sgl) {
240 		/* Configure Output SGE for Encap/Decap */
241 		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova +  ofs.ofs.cipher.head);
242 		sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
243 
244 		/* o/p segs */
245 		for (i = 1; i < dest_sgl->num; i++) {
246 			sge++;
247 			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
248 			sge->length = dest_sgl->vec[i].len;
249 		}
250 	} else {
251 		/* Configure Output SGE for Encap/Decap */
252 		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova + ofs.ofs.cipher.head);
253 		sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
254 
255 		/* o/p segs */
256 		for (i = 1; i < sgl->num; i++) {
257 			sge++;
258 			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
259 			sge->length = sgl->vec[i].len;
260 		}
261 	}
262 
263 	if (sess->dir == DIR_ENC) {
264 		sge++;
265 		DPAA2_SET_FLE_ADDR(sge, digest->iova);
266 		sge->length = icv_len;
267 	}
268 	DPAA2_SET_FLE_FIN(sge);
269 
270 	sge++;
271 
272 	/* Configure Input FLE with Scatter/Gather Entry */
273 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
274 	DPAA2_SET_FLE_SG_EXT(ip_fle);
275 	DPAA2_SET_FLE_FIN(ip_fle);
276 	ip_fle->length = (sess->dir == DIR_ENC) ?
277 		(aead_len + sess->iv.length + auth_only_len) :
278 		(aead_len + sess->iv.length + auth_only_len +
279 		icv_len);
280 
281 	/* Configure Input SGE for Encap/Decap */
282 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
283 	sge->length = sess->iv.length;
284 
285 	sge++;
286 	if (auth_only_len) {
287 		DPAA2_SET_FLE_ADDR(sge, auth_iv->iova);
288 		sge->length = auth_only_len;
289 		sge++;
290 	}
291 
292 	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova + ofs.ofs.cipher.head);
293 	sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
294 
295 	/* i/p segs */
296 	for (i = 1; i < sgl->num; i++) {
297 		sge++;
298 		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
299 		sge->length = sgl->vec[i].len;
300 	}
301 
302 	if (sess->dir == DIR_DEC) {
303 		sge++;
304 		old_icv = (uint8_t *)(sge + 1);
305 		memcpy(old_icv,  digest->va, icv_len);
306 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
307 		sge->length = icv_len;
308 	}
309 
310 	DPAA2_SET_FLE_FIN(sge);
311 	if (auth_only_len) {
312 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
313 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
314 	}
315 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
316 
317 	return 0;
318 }
319 
320 static int
321 build_raw_dp_auth_fd(uint8_t *drv_ctx,
322 		       struct rte_crypto_sgl *sgl,
323 		       struct rte_crypto_sgl *dest_sgl,
324 		       struct rte_crypto_va_iova_ptr *iv,
325 		       struct rte_crypto_va_iova_ptr *digest,
326 		       struct rte_crypto_va_iova_ptr *auth_iv,
327 		       union rte_crypto_sym_ofs ofs,
328 		       void *userdata,
329 		       struct qbman_fd *fd)
330 {
331 	RTE_SET_USED(iv);
332 	RTE_SET_USED(auth_iv);
333 	RTE_SET_USED(dest_sgl);
334 
335 	dpaa2_sec_session *sess =
336 		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
337 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
338 	struct sec_flow_context *flc;
339 	int total_len = 0, data_len = 0, data_offset;
340 	uint8_t *old_digest;
341 	struct ctxt_priv *priv = sess->ctxt;
342 	unsigned int i;
343 
344 	for (i = 0; i < sgl->num; i++)
345 		total_len += sgl->vec[i].len;
346 
347 	data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
348 	data_offset = ofs.ofs.auth.head;
349 
350 	/* For SNOW3G and ZUC, lengths in bits only supported */
351 	fle = (struct qbman_fle *)rte_malloc(NULL,
352 		FLE_SG_MEM_SIZE(2 * sgl->num),
353 			RTE_CACHE_LINE_SIZE);
354 	if (unlikely(!fle)) {
355 		DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
356 		return -ENOMEM;
357 	}
358 	memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
359 	/* first FLE entry used to store mbuf and session ctxt */
360 	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
361 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
362 	op_fle = fle + 1;
363 	ip_fle = fle + 2;
364 	sge = fle + 3;
365 
366 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
367 
368 	/* sg FD */
369 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
370 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
371 	DPAA2_SET_FD_COMPOUND_FMT(fd);
372 
373 	/* o/p fle */
374 	DPAA2_SET_FLE_ADDR(op_fle,
375 			DPAA2_VADDR_TO_IOVA(digest->va));
376 	op_fle->length = sess->digest_length;
377 
378 	/* i/p fle */
379 	DPAA2_SET_FLE_SG_EXT(ip_fle);
380 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
381 	ip_fle->length = data_len;
382 
383 	if (sess->iv.length) {
384 		uint8_t *iv_ptr;
385 
386 		iv_ptr = rte_crypto_op_ctod_offset(userdata, uint8_t *,
387 						sess->iv.offset);
388 
389 		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
390 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
391 			sge->length = 12;
392 		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
393 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
394 			sge->length = 8;
395 		} else {
396 			sge->length = sess->iv.length;
397 		}
398 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
399 		ip_fle->length += sge->length;
400 		sge++;
401 	}
402 	/* i/p 1st seg */
403 	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova + data_offset);
404 
405 	if (data_len <= (int)(sgl->vec[0].len - data_offset)) {
406 		sge->length = data_len;
407 		data_len = 0;
408 	} else {
409 		sge->length = sgl->vec[0].len - data_offset;
410 		for (i = 1; i < sgl->num; i++) {
411 			sge++;
412 			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
413 			sge->length = sgl->vec[i].len;
414 		}
415 	}
416 	if (sess->dir == DIR_DEC) {
417 		/* Digest verification case */
418 		sge++;
419 		old_digest = (uint8_t *)(sge + 1);
420 		rte_memcpy(old_digest, digest->va,
421 			sess->digest_length);
422 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
423 		sge->length = sess->digest_length;
424 		ip_fle->length += sess->digest_length;
425 	}
426 	DPAA2_SET_FLE_FIN(sge);
427 	DPAA2_SET_FLE_FIN(ip_fle);
428 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
429 
430 	return 0;
431 }
432 
433 static int
434 build_raw_dp_proto_fd(uint8_t *drv_ctx,
435 		       struct rte_crypto_sgl *sgl,
436 		       struct rte_crypto_sgl *dest_sgl,
437 		       struct rte_crypto_va_iova_ptr *iv,
438 		       struct rte_crypto_va_iova_ptr *digest,
439 		       struct rte_crypto_va_iova_ptr *auth_iv,
440 		       union rte_crypto_sym_ofs ofs,
441 		       void *userdata,
442 		       struct qbman_fd *fd)
443 {
444 	RTE_SET_USED(iv);
445 	RTE_SET_USED(digest);
446 	RTE_SET_USED(auth_iv);
447 	RTE_SET_USED(ofs);
448 
449 	dpaa2_sec_session *sess =
450 		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
451 	struct ctxt_priv *priv = sess->ctxt;
452 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
453 	struct sec_flow_context *flc;
454 	uint32_t in_len = 0, out_len = 0, i;
455 
456 	/* first FLE entry used to store mbuf and session ctxt */
457 	fle = (struct qbman_fle *)rte_malloc(NULL,
458 			FLE_SG_MEM_SIZE(2 * sgl->num),
459 			RTE_CACHE_LINE_SIZE);
460 	if (unlikely(!fle)) {
461 		DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
462 		return -ENOMEM;
463 	}
464 	memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
465 	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
466 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
467 
468 	/* Save the shared descriptor */
469 	flc = &priv->flc_desc[0].flc;
470 	op_fle = fle + 1;
471 	ip_fle = fle + 2;
472 	sge = fle + 3;
473 
474 	DPAA2_SET_FD_IVP(fd);
475 	DPAA2_SET_FLE_IVP(op_fle);
476 	DPAA2_SET_FLE_IVP(ip_fle);
477 
478 	/* Configure FD as a FRAME LIST */
479 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
480 	DPAA2_SET_FD_COMPOUND_FMT(fd);
481 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
482 
483 	/* Configure Output FLE with Scatter/Gather Entry */
484 	DPAA2_SET_FLE_SG_EXT(op_fle);
485 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
486 
487 	/* OOP */
488 	if (dest_sgl) {
489 		/* Configure Output SGE for Encap/Decap */
490 		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
491 		sge->length = dest_sgl->vec[0].len;
492 		out_len += sge->length;
493 		/* o/p segs */
494 		for (i = 1; i < dest_sgl->num; i++) {
495 			sge++;
496 			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
497 			sge->length = dest_sgl->vec[i].len;
498 			out_len += sge->length;
499 		}
500 		sge->length = dest_sgl->vec[i - 1].tot_len;
501 
502 	} else {
503 		/* Configure Output SGE for Encap/Decap */
504 		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
505 		sge->length = sgl->vec[0].len;
506 		out_len += sge->length;
507 		/* o/p segs */
508 		for (i = 1; i < sgl->num; i++) {
509 			sge++;
510 			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
511 			sge->length = sgl->vec[i].len;
512 			out_len += sge->length;
513 		}
514 		sge->length = sgl->vec[i - 1].tot_len;
515 	}
516 	out_len += sge->length;
517 
518 	DPAA2_SET_FLE_FIN(sge);
519 	op_fle->length = out_len;
520 
521 	sge++;
522 
523 	/* Configure Input FLE with Scatter/Gather Entry */
524 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
525 	DPAA2_SET_FLE_SG_EXT(ip_fle);
526 	DPAA2_SET_FLE_FIN(ip_fle);
527 
528 	/* Configure input SGE for Encap/Decap */
529 	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
530 	sge->length = sgl->vec[0].len;
531 	in_len += sge->length;
532 	/* i/p segs */
533 	for (i = 1; i < sgl->num; i++) {
534 		sge++;
535 		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
536 		sge->length = sgl->vec[i].len;
537 		in_len += sge->length;
538 	}
539 
540 	ip_fle->length = in_len;
541 	DPAA2_SET_FLE_FIN(sge);
542 
543 	/* In case of PDCP, per packet HFN is stored in
544 	 * mbuf priv after sym_op.
545 	 */
546 	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
547 		uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)userdata +
548 				sess->pdcp.hfn_ovd_offset);
549 		/* enable HFN override */
550 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
551 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
552 		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
553 	}
554 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
555 
556 	return 0;
557 }
558 
559 static int
560 build_raw_dp_cipher_fd(uint8_t *drv_ctx,
561 		       struct rte_crypto_sgl *sgl,
562 		       struct rte_crypto_sgl *dest_sgl,
563 		       struct rte_crypto_va_iova_ptr *iv,
564 		       struct rte_crypto_va_iova_ptr *digest,
565 		       struct rte_crypto_va_iova_ptr *auth_iv,
566 		       union rte_crypto_sym_ofs ofs,
567 		       void *userdata,
568 		       struct qbman_fd *fd)
569 {
570 	RTE_SET_USED(digest);
571 	RTE_SET_USED(auth_iv);
572 
573 	dpaa2_sec_session *sess =
574 		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
575 	struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
576 	int total_len = 0, data_len = 0, data_offset;
577 	struct sec_flow_context *flc;
578 	struct ctxt_priv *priv = sess->ctxt;
579 	unsigned int i;
580 
581 	for (i = 0; i < sgl->num; i++)
582 		total_len += sgl->vec[i].len;
583 
584 	data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
585 	data_offset = ofs.ofs.cipher.head;
586 
587 	/* For SNOW3G and ZUC, lengths in bits only supported */
588 	/* first FLE entry used to store mbuf and session ctxt */
589 	fle = (struct qbman_fle *)rte_malloc(NULL,
590 			FLE_SG_MEM_SIZE(2*sgl->num),
591 			RTE_CACHE_LINE_SIZE);
592 	if (!fle) {
593 		DPAA2_SEC_ERR("RAW CIPHER SG: Memory alloc failed for SGE");
594 		return -ENOMEM;
595 	}
596 	memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
597 	/* first FLE entry used to store userdata and session ctxt */
598 	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
599 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
600 
601 	op_fle = fle + 1;
602 	ip_fle = fle + 2;
603 	sge = fle + 3;
604 
605 	flc = &priv->flc_desc[0].flc;
606 
607 	DPAA2_SEC_DP_DEBUG(
608 		"RAW CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d",
609 		data_offset,
610 		data_len,
611 		sess->iv.length);
612 
613 	/* o/p fle */
614 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
615 	op_fle->length = data_len;
616 	DPAA2_SET_FLE_SG_EXT(op_fle);
617 
618 	/* OOP */
619 	if (dest_sgl) {
620 		/* o/p 1st seg */
621 		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova + data_offset);
622 		sge->length = dest_sgl->vec[0].len - data_offset;
623 
624 		/* o/p segs */
625 		for (i = 1; i < dest_sgl->num; i++) {
626 			sge++;
627 			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
628 			sge->length = dest_sgl->vec[i].len;
629 		}
630 	} else {
631 		/* o/p 1st seg */
632 		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova + data_offset);
633 		sge->length = sgl->vec[0].len - data_offset;
634 
635 		/* o/p segs */
636 		for (i = 1; i < sgl->num; i++) {
637 			sge++;
638 			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
639 			sge->length = sgl->vec[i].len;
640 		}
641 	}
642 	DPAA2_SET_FLE_FIN(sge);
643 
644 	DPAA2_SEC_DP_DEBUG(
645 		"RAW CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d",
646 		flc, fle, fle->addr_hi, fle->addr_lo,
647 		fle->length);
648 
649 	/* i/p fle */
650 	sge++;
651 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
652 	ip_fle->length = sess->iv.length + data_len;
653 	DPAA2_SET_FLE_SG_EXT(ip_fle);
654 
655 	/* i/p IV */
656 	DPAA2_SET_FLE_ADDR(sge, iv->iova);
657 	sge->length = sess->iv.length;
658 
659 	sge++;
660 
661 	/* i/p 1st seg */
662 	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova + data_offset);
663 	sge->length = sgl->vec[0].len - data_offset;
664 
665 	/* i/p segs */
666 	for (i = 1; i < sgl->num; i++) {
667 		sge++;
668 		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
669 		sge->length = sgl->vec[i].len;
670 	}
671 	DPAA2_SET_FLE_FIN(sge);
672 	DPAA2_SET_FLE_FIN(ip_fle);
673 
674 	/* sg fd */
675 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
676 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
677 	DPAA2_SET_FD_COMPOUND_FMT(fd);
678 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
679 
680 	DPAA2_SEC_DP_DEBUG(
681 		"RAW CIPHER SG: fdaddr =%" PRIx64 " off =%d, len =%d",
682 		DPAA2_GET_FD_ADDR(fd),
683 		DPAA2_GET_FD_OFFSET(fd),
684 		DPAA2_GET_FD_LEN(fd));
685 
686 	return 0;
687 }
688 
689 static __rte_always_inline uint32_t
690 dpaa2_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
691 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
692 	void *user_data[], int *status)
693 {
694 	RTE_SET_USED(user_data);
695 	uint32_t loop;
696 	int32_t ret;
697 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
698 	uint32_t frames_to_send, retry_count;
699 	struct qbman_eq_desc eqdesc;
700 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
701 	dpaa2_sec_session *sess =
702 		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
703 	struct qbman_swp *swp;
704 	uint16_t num_tx = 0;
705 	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
706 
707 	if (unlikely(vec->num == 0))
708 		return 0;
709 
710 	if (sess == NULL) {
711 		DPAA2_SEC_ERR("sessionless raw crypto not supported");
712 		return 0;
713 	}
714 	/*Prepare enqueue descriptor*/
715 	qbman_eq_desc_clear(&eqdesc);
716 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
717 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
718 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
719 
720 	if (!DPAA2_PER_LCORE_DPIO) {
721 		ret = dpaa2_affine_qbman_swp();
722 		if (ret) {
723 			DPAA2_SEC_ERR(
724 				"Failed to allocate IO portal, tid: %d",
725 				rte_gettid());
726 			return 0;
727 		}
728 	}
729 	swp = DPAA2_PER_LCORE_PORTAL;
730 
731 	while (vec->num) {
732 		frames_to_send = (vec->num > dpaa2_eqcr_size) ?
733 			dpaa2_eqcr_size : vec->num;
734 
735 		for (loop = 0; loop < frames_to_send; loop++) {
736 			/*Clear the unused FD fields before sending*/
737 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
738 			ret = sess->build_raw_dp_fd(drv_ctx,
739 						    &vec->src_sgl[loop],
740 						    &vec->dest_sgl[loop],
741 						    &vec->iv[loop],
742 						    &vec->digest[loop],
743 						    &vec->auth_iv[loop],
744 						    ofs,
745 						    user_data[loop],
746 						    &fd_arr[loop]);
747 			if (ret) {
748 				DPAA2_SEC_ERR("error: Improper packet contents"
749 					      " for crypto operation");
750 				goto skip_tx;
751 			}
752 			status[loop] = 1;
753 		}
754 
755 		loop = 0;
756 		retry_count = 0;
757 		while (loop < frames_to_send) {
758 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
759 							 &fd_arr[loop],
760 							 &flags[loop],
761 							 frames_to_send - loop);
762 			if (unlikely(ret < 0)) {
763 				retry_count++;
764 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
765 					num_tx += loop;
766 					vec->num -= loop;
767 					goto skip_tx;
768 				}
769 			} else {
770 				loop += ret;
771 				retry_count = 0;
772 			}
773 		}
774 
775 		num_tx += loop;
776 		vec->num -= loop;
777 	}
778 skip_tx:
779 	dpaa2_qp->tx_vq.tx_pkts += num_tx;
780 	dpaa2_qp->tx_vq.err_pkts += vec->num;
781 
782 	return num_tx;
783 }
784 
785 static __rte_always_inline int
786 dpaa2_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
787 	struct rte_crypto_vec *data_vec,
788 	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
789 	struct rte_crypto_va_iova_ptr *iv,
790 	struct rte_crypto_va_iova_ptr *digest,
791 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
792 	void *user_data)
793 {
794 	RTE_SET_USED(qp_data);
795 	RTE_SET_USED(drv_ctx);
796 	RTE_SET_USED(data_vec);
797 	RTE_SET_USED(n_data_vecs);
798 	RTE_SET_USED(ofs);
799 	RTE_SET_USED(iv);
800 	RTE_SET_USED(digest);
801 	RTE_SET_USED(aad_or_auth_iv);
802 	RTE_SET_USED(user_data);
803 
804 	return 0;
805 }
806 
807 static inline void *
808 sec_fd_to_userdata(const struct qbman_fd *fd)
809 {
810 	struct qbman_fle *fle;
811 	void *userdata;
812 	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
813 
814 	DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x",
815 			   fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
816 	userdata = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
817 	/* free the fle memory */
818 	rte_free((void *)(fle-1));
819 
820 	return userdata;
821 }
822 
823 static __rte_always_inline uint32_t
824 dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
825 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
826 	uint32_t max_nb_to_dequeue,
827 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
828 	void **out_user_data, uint8_t is_user_data_array,
829 	uint32_t *n_success, int *dequeue_status)
830 {
831 	RTE_SET_USED(drv_ctx);
832 	RTE_SET_USED(get_dequeue_count);
833 
834 	/* Function is responsible to receive frames for a given device and VQ*/
835 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
836 	struct qbman_result *dq_storage;
837 	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
838 	int ret, num_rx = 0;
839 	uint8_t is_last = 0, status, is_success = 0;
840 	struct qbman_swp *swp;
841 	const struct qbman_fd *fd;
842 	struct qbman_pull_desc pulldesc;
843 	void *user_data;
844 	uint32_t nb_ops = max_nb_to_dequeue;
845 
846 	if (!DPAA2_PER_LCORE_DPIO) {
847 		ret = dpaa2_affine_qbman_swp();
848 		if (ret) {
849 			DPAA2_SEC_ERR(
850 				"Failed to allocate IO portal, tid: %d",
851 				rte_gettid());
852 			return 0;
853 		}
854 	}
855 	swp = DPAA2_PER_LCORE_PORTAL;
856 	dq_storage = dpaa2_qp->rx_vq.q_storage[0]->dq_storage[0];
857 
858 	qbman_pull_desc_clear(&pulldesc);
859 	qbman_pull_desc_set_numframes(&pulldesc,
860 				      (nb_ops > dpaa2_dqrr_size) ?
861 				      dpaa2_dqrr_size : nb_ops);
862 	qbman_pull_desc_set_fq(&pulldesc, fqid);
863 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
864 				    (uint64_t)DPAA2_VADDR_TO_IOVA(dq_storage),
865 				    1);
866 
867 	/*Issue a volatile dequeue command. */
868 	while (1) {
869 		if (qbman_swp_pull(swp, &pulldesc)) {
870 			DPAA2_SEC_WARN(
871 				"SEC VDQ command is not issued : QBMAN busy");
872 			/* Portal was busy, try again */
873 			continue;
874 		}
875 		break;
876 	};
877 
878 	/* Receive the packets till Last Dequeue entry is found with
879 	 * respect to the above issues PULL command.
880 	 */
881 	while (!is_last) {
882 		/* Check if the previous issued command is completed.
883 		 * Also seems like the SWP is shared between the Ethernet Driver
884 		 * and the SEC driver.
885 		 */
886 		while (!qbman_check_command_complete(dq_storage))
887 			;
888 
889 		/* Loop until the dq_storage is updated with
890 		 * new token by QBMAN
891 		 */
892 		while (!qbman_check_new_result(dq_storage))
893 			;
894 		/* Check whether Last Pull command is Expired and
895 		 * setting Condition for Loop termination
896 		 */
897 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
898 			is_last = 1;
899 			/* Check for valid frame. */
900 			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
901 			if (unlikely(
902 				(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
903 				DPAA2_SEC_DP_DEBUG("No frame is delivered");
904 				continue;
905 			}
906 		}
907 
908 		fd = qbman_result_DQ_fd(dq_storage);
909 		user_data = sec_fd_to_userdata(fd);
910 		if (is_user_data_array)
911 			out_user_data[num_rx] = user_data;
912 		else
913 			out_user_data[0] = user_data;
914 		if (unlikely(fd->simple.frc)) {
915 			/* TODO Parse SEC errors */
916 			DPAA2_SEC_ERR("SEC returned Error - %x",
917 				      fd->simple.frc);
918 			is_success = false;
919 		} else {
920 			is_success = true;
921 		}
922 		post_dequeue(user_data, num_rx, is_success);
923 
924 		num_rx++;
925 		dq_storage++;
926 	} /* End of Packet Rx loop */
927 
928 	dpaa2_qp->rx_vq.rx_pkts += num_rx;
929 	*dequeue_status = 1;
930 	*n_success = num_rx;
931 
932 	DPAA2_SEC_DP_DEBUG("SEC Received %d Packets", num_rx);
933 	/*Return the total number of packets received to DPAA2 app*/
934 	return num_rx;
935 }
936 
937 static __rte_always_inline void *
938 dpaa2_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
939 		enum rte_crypto_op_status *op_status)
940 {
941 	RTE_SET_USED(qp_data);
942 	RTE_SET_USED(drv_ctx);
943 	RTE_SET_USED(dequeue_status);
944 	RTE_SET_USED(op_status);
945 
946 	return NULL;
947 }
948 
949 static __rte_always_inline int
950 dpaa2_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
951 {
952 	RTE_SET_USED(qp_data);
953 	RTE_SET_USED(drv_ctx);
954 	RTE_SET_USED(n);
955 
956 	return 0;
957 }
958 
959 static __rte_always_inline int
960 dpaa2_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
961 {
962 	RTE_SET_USED(qp_data);
963 	RTE_SET_USED(drv_ctx);
964 	RTE_SET_USED(n);
965 
966 	return 0;
967 }
968 
969 int
970 dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
971 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
972 	enum rte_crypto_op_sess_type sess_type,
973 	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
974 {
975 	dpaa2_sec_session *sess;
976 	struct dpaa2_sec_raw_dp_ctx *dp_ctx;
977 	RTE_SET_USED(qp_id);
978 
979 	if (!is_update) {
980 		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
981 		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
982 	}
983 
984 	if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
985 		sess = SECURITY_GET_SESS_PRIV(session_ctx.sec_sess);
986 	else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
987 		sess = CRYPTODEV_GET_SYM_SESS_PRIV(session_ctx.crypto_sess);
988 	else
989 		return -ENOTSUP;
990 	raw_dp_ctx->dequeue_burst = dpaa2_sec_raw_dequeue_burst;
991 	raw_dp_ctx->dequeue = dpaa2_sec_raw_dequeue;
992 	raw_dp_ctx->dequeue_done = dpaa2_sec_raw_dequeue_done;
993 	raw_dp_ctx->enqueue_burst = dpaa2_sec_raw_enqueue_burst;
994 	raw_dp_ctx->enqueue = dpaa2_sec_raw_enqueue;
995 	raw_dp_ctx->enqueue_done = dpaa2_sec_raw_enqueue_done;
996 
997 	if (sess->ctxt_type == DPAA2_SEC_CIPHER_HASH)
998 		sess->build_raw_dp_fd = build_raw_dp_chain_fd;
999 	else if (sess->ctxt_type == DPAA2_SEC_AEAD)
1000 		sess->build_raw_dp_fd = build_raw_dp_aead_fd;
1001 	else if (sess->ctxt_type == DPAA2_SEC_AUTH)
1002 		sess->build_raw_dp_fd = build_raw_dp_auth_fd;
1003 	else if (sess->ctxt_type == DPAA2_SEC_CIPHER)
1004 		sess->build_raw_dp_fd = build_raw_dp_cipher_fd;
1005 	else if (sess->ctxt_type == DPAA2_SEC_IPSEC ||
1006 		sess->ctxt_type == DPAA2_SEC_PDCP)
1007 		sess->build_raw_dp_fd = build_raw_dp_proto_fd;
1008 	else
1009 		return -ENOTSUP;
1010 	dp_ctx = (struct dpaa2_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
1011 	dp_ctx->session = sess;
1012 
1013 	return 0;
1014 }
1015 
1016 int
1017 dpaa2_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
1018 {
1019 	return sizeof(struct dpaa2_sec_raw_dp_ctx);
1020 }
1021