xref: /dpdk/drivers/crypto/nitrox/nitrox_sym_reqmgr.c (revision e99981af34632ecce3bac82d05db97b08308f9b5)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4 
5 #include <rte_crypto.h>
6 #include <cryptodev_pmd.h>
7 #include <rte_cycles.h>
8 #include <rte_errno.h>
9 
10 #include "nitrox_sym_reqmgr.h"
11 #include "nitrox_logs.h"
12 
13 #define MAX_SUPPORTED_MBUF_SEGS 16
14 /* IV + AAD + ORH + CC + DIGEST */
15 #define ADDITIONAL_SGBUF_CNT 5
16 #define MAX_SGBUF_CNT (MAX_SUPPORTED_MBUF_SEGS + ADDITIONAL_SGBUF_CNT)
17 #define MAX_SGCOMP_CNT (RTE_ALIGN_MUL_CEIL(MAX_SGBUF_CNT, 4) / 4)
18 /* SLC_STORE_INFO */
19 #define MIN_UDD_LEN 16
20 /* PKT_IN_HDR + SLC_STORE_INFO */
21 #define FDATA_SIZE 32
22 /* Base destination port for the solicited requests */
23 #define SOLICIT_BASE_DPORT 256
24 #define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL
25 #define CMD_TIMEOUT 2
26 /* For AES_CCM actual AAD will be copied 18 bytes after the AAD pointer, according to the API */
27 #define DPDK_AES_CCM_ADD_OFFSET 18
28 
29 struct gphdr {
30 	uint16_t param0;
31 	uint16_t param1;
32 	uint16_t param2;
33 	uint16_t param3;
34 };
35 
36 union pkt_instr_hdr {
37 	uint64_t value;
38 	struct {
39 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
40 		uint64_t raz_48_63 : 16;
41 		uint64_t g : 1;
42 		uint64_t gsz : 7;
43 		uint64_t ihi : 1;
44 		uint64_t ssz : 7;
45 		uint64_t raz_30_31 : 2;
46 		uint64_t fsz : 6;
47 		uint64_t raz_16_23 : 8;
48 		uint64_t tlen : 16;
49 #else
50 		uint64_t tlen : 16;
51 		uint64_t raz_16_23 : 8;
52 		uint64_t fsz : 6;
53 		uint64_t raz_30_31 : 2;
54 		uint64_t ssz : 7;
55 		uint64_t ihi : 1;
56 		uint64_t gsz : 7;
57 		uint64_t g : 1;
58 		uint64_t raz_48_63 : 16;
59 #endif
60 	} s;
61 };
62 
63 union pkt_hdr {
64 	uint64_t value[2];
65 	struct {
66 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
67 		uint64_t opcode : 8;
68 		uint64_t arg : 8;
69 		uint64_t ctxc : 2;
70 		uint64_t unca : 1;
71 		uint64_t raz_44 : 1;
72 		uint64_t info : 3;
73 		uint64_t destport : 9;
74 		uint64_t unc : 8;
75 		uint64_t raz_19_23 : 5;
76 		uint64_t grp : 3;
77 		uint64_t raz_15 : 1;
78 		uint64_t ctxl : 7;
79 		uint64_t uddl : 8;
80 #else
81 		uint64_t uddl : 8;
82 		uint64_t ctxl : 7;
83 		uint64_t raz_15 : 1;
84 		uint64_t grp : 3;
85 		uint64_t raz_19_23 : 5;
86 		uint64_t unc : 8;
87 		uint64_t destport : 9;
88 		uint64_t info : 3;
89 		uint64_t raz_44 : 1;
90 		uint64_t unca : 1;
91 		uint64_t ctxc : 2;
92 		uint64_t arg : 8;
93 		uint64_t opcode : 8;
94 #endif
95 		uint64_t ctxp;
96 	} s;
97 };
98 
99 union slc_store_info {
100 	uint64_t value[2];
101 	struct {
102 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
103 		uint64_t raz_39_63 : 25;
104 		uint64_t ssz : 7;
105 		uint64_t raz_0_31 : 32;
106 #else
107 		uint64_t raz_0_31 : 32;
108 		uint64_t ssz : 7;
109 		uint64_t raz_39_63 : 25;
110 #endif
111 		uint64_t rptr;
112 	} s;
113 };
114 
115 struct nps_pkt_instr {
116 	uint64_t dptr0;
117 	union pkt_instr_hdr ih;
118 	union pkt_hdr irh;
119 	union slc_store_info slc;
120 	uint64_t fdata[2];
121 };
122 
123 struct resp_hdr {
124 	uint64_t orh;
125 	uint64_t completion;
126 };
127 
128 struct nitrox_sglist {
129 	uint16_t len;
130 	uint16_t raz0;
131 	uint32_t raz1;
132 	rte_iova_t iova;
133 	void *virt;
134 };
135 
136 struct nitrox_sgcomp {
137 	uint16_t len[4];
138 	uint64_t iova[4];
139 };
140 
141 struct nitrox_sgtable {
142 	uint8_t map_bufs_cnt;
143 	uint8_t nr_sgcomp;
144 	uint16_t total_bytes;
145 
146 	struct nitrox_sglist sglist[MAX_SGBUF_CNT];
147 	struct nitrox_sgcomp sgcomp[MAX_SGCOMP_CNT];
148 };
149 
150 struct iv {
151 	uint8_t *virt;
152 	rte_iova_t iova;
153 	uint16_t len;
154 };
155 
156 struct nitrox_softreq {
157 	struct nitrox_crypto_ctx *ctx;
158 	struct rte_crypto_op *op;
159 	struct gphdr gph;
160 	struct nps_pkt_instr instr;
161 	struct resp_hdr resp;
162 	struct nitrox_sgtable in;
163 	struct nitrox_sgtable out;
164 	struct iv iv;
165 	uint64_t timeout;
166 	rte_iova_t dptr;
167 	rte_iova_t rptr;
168 	rte_iova_t iova;
169 };
170 
171 static void
172 softreq_init(struct nitrox_softreq *sr, rte_iova_t iova)
173 {
174 	memset(sr, 0, sizeof(*sr));
175 	sr->iova = iova;
176 }
177 
178 /*
179  * 64-Byte Instruction Format
180  *
181  *  ----------------------
182  *  |      DPTR0         | 8 bytes
183  *  ----------------------
184  *  |  PKT_IN_INSTR_HDR  | 8 bytes
185  *  ----------------------
186  *  |    PKT_IN_HDR      | 16 bytes
187  *  ----------------------
188  *  |    SLC_INFO        | 16 bytes
189  *  ----------------------
190  *  |   Front data       | 16 bytes
191  *  ----------------------
192  */
193 static void
194 create_se_instr(struct nitrox_softreq *sr, uint8_t qno)
195 {
196 	struct nitrox_crypto_ctx *ctx = sr->ctx;
197 	rte_iova_t ctx_handle;
198 
199 	/* fill the packet instruction */
200 	/* word 0 */
201 	sr->instr.dptr0 = rte_cpu_to_be_64(sr->dptr);
202 
203 	/* word 1 */
204 	sr->instr.ih.value = 0;
205 	sr->instr.ih.s.g = 1;
206 	sr->instr.ih.s.gsz = sr->in.map_bufs_cnt;
207 	sr->instr.ih.s.ssz = sr->out.map_bufs_cnt;
208 	sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr);
209 	sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes;
210 	sr->instr.ih.value = rte_cpu_to_be_64(sr->instr.ih.value);
211 
212 	/* word 2 */
213 	sr->instr.irh.value[0] = 0;
214 	sr->instr.irh.s.uddl = MIN_UDD_LEN;
215 	/* context length in 64-bit words */
216 	sr->instr.irh.s.ctxl = RTE_ALIGN_MUL_CEIL(sizeof(ctx->fctx), 8) / 8;
217 	/* offset from solicit base port 256 */
218 	sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno;
219 	/* Invalid context cache */
220 	sr->instr.irh.s.ctxc = 0x3;
221 	sr->instr.irh.s.arg = ctx->req_op;
222 	sr->instr.irh.s.opcode = ctx->opcode;
223 	sr->instr.irh.value[0] = rte_cpu_to_be_64(sr->instr.irh.value[0]);
224 
225 	/* word 3 */
226 	ctx_handle = ctx->iova + offsetof(struct nitrox_crypto_ctx, fctx);
227 	sr->instr.irh.s.ctxp = rte_cpu_to_be_64(ctx_handle);
228 
229 	/* word 4 */
230 	sr->instr.slc.value[0] = 0;
231 	sr->instr.slc.s.ssz = sr->out.map_bufs_cnt;
232 	sr->instr.slc.value[0] = rte_cpu_to_be_64(sr->instr.slc.value[0]);
233 
234 	/* word 5 */
235 	sr->instr.slc.s.rptr = rte_cpu_to_be_64(sr->rptr);
236 	/*
237 	 * No conversion for front data,
238 	 * It goes into payload
239 	 * put GP Header in front data
240 	 */
241 	memcpy(&sr->instr.fdata[0], &sr->gph, sizeof(sr->instr.fdata[0]));
242 	sr->instr.fdata[1] = 0;
243 }
244 
245 static void
246 softreq_copy_iv(struct nitrox_softreq *sr, uint8_t salt_size)
247 {
248 	uint16_t offset = sr->ctx->iv.offset + salt_size;
249 
250 	sr->iv.virt = rte_crypto_op_ctod_offset(sr->op, uint8_t *, offset);
251 	sr->iv.iova = rte_crypto_op_ctophys_offset(sr->op, offset);
252 	sr->iv.len = sr->ctx->iv.length - salt_size;
253 }
254 
255 static void
256 fill_sglist(struct nitrox_sgtable *sgtbl, uint16_t len, rte_iova_t iova,
257 	    void *virt)
258 {
259 	struct nitrox_sglist *sglist = sgtbl->sglist;
260 	uint8_t cnt = sgtbl->map_bufs_cnt;
261 
262 	if (unlikely(!len))
263 		return;
264 
265 	sglist[cnt].len = len;
266 	sglist[cnt].iova = iova;
267 	sglist[cnt].virt = virt;
268 	sgtbl->total_bytes += len;
269 	cnt++;
270 	sgtbl->map_bufs_cnt = cnt;
271 }
272 
273 static int
274 create_sglist_from_mbuf(struct nitrox_sgtable *sgtbl, struct rte_mbuf *mbuf,
275 			uint32_t off, int datalen)
276 {
277 	struct nitrox_sglist *sglist = sgtbl->sglist;
278 	uint8_t cnt = sgtbl->map_bufs_cnt;
279 	struct rte_mbuf *m;
280 	int mlen;
281 
282 	if (unlikely(datalen <= 0))
283 		return 0;
284 
285 	for (m = mbuf; m && off > rte_pktmbuf_data_len(m); m = m->next)
286 		off -= rte_pktmbuf_data_len(m);
287 
288 	if (unlikely(!m))
289 		return -EIO;
290 
291 	mlen = rte_pktmbuf_data_len(m) - off;
292 	if (datalen <= mlen)
293 		mlen = datalen;
294 	sglist[cnt].len = mlen;
295 	sglist[cnt].iova = rte_pktmbuf_iova_offset(m, off);
296 	sglist[cnt].virt = rte_pktmbuf_mtod_offset(m, uint8_t *, off);
297 	sgtbl->total_bytes += mlen;
298 	cnt++;
299 	datalen -= mlen;
300 	for (m = m->next; m && datalen; m = m->next) {
301 		mlen = rte_pktmbuf_data_len(m) < datalen ?
302 			rte_pktmbuf_data_len(m) : datalen;
303 		sglist[cnt].len = mlen;
304 		sglist[cnt].iova = rte_pktmbuf_iova(m);
305 		sglist[cnt].virt = rte_pktmbuf_mtod(m, uint8_t *);
306 		sgtbl->total_bytes += mlen;
307 		cnt++;
308 		datalen -= mlen;
309 	}
310 
311 	RTE_ASSERT(cnt <= MAX_SGBUF_CNT);
312 	sgtbl->map_bufs_cnt = cnt;
313 	return 0;
314 }
315 
316 static void
317 create_sgcomp(struct nitrox_sgtable *sgtbl)
318 {
319 	int i, j, nr_sgcomp;
320 	struct nitrox_sgcomp *sgcomp = sgtbl->sgcomp;
321 	struct nitrox_sglist *sglist = sgtbl->sglist;
322 
323 	nr_sgcomp = RTE_ALIGN_MUL_CEIL(sgtbl->map_bufs_cnt, 4) / 4;
324 	sgtbl->nr_sgcomp = nr_sgcomp;
325 	for (i = 0; i < nr_sgcomp; i++, sgcomp++) {
326 		for (j = 0; j < 4; j++, sglist++) {
327 			sgcomp->len[j] = rte_cpu_to_be_16(sglist->len);
328 			sgcomp->iova[j] = rte_cpu_to_be_64(sglist->iova);
329 		}
330 	}
331 }
332 
333 static int
334 create_cipher_inbuf(struct nitrox_softreq *sr)
335 {
336 	int err;
337 	struct rte_crypto_op *op = sr->op;
338 
339 	fill_sglist(&sr->in, sr->iv.len, sr->iv.iova, sr->iv.virt);
340 	err = create_sglist_from_mbuf(&sr->in, op->sym->m_src,
341 				      op->sym->cipher.data.offset,
342 				      op->sym->cipher.data.length);
343 	if (unlikely(err))
344 		return err;
345 
346 	create_sgcomp(&sr->in);
347 	sr->dptr = sr->iova + offsetof(struct nitrox_softreq, in.sgcomp);
348 
349 	return 0;
350 }
351 
352 static int
353 create_cipher_outbuf(struct nitrox_softreq *sr)
354 {
355 	struct rte_crypto_op *op = sr->op;
356 	int err, cnt = 0;
357 	struct rte_mbuf *m_dst = op->sym->m_dst ? op->sym->m_dst :
358 		op->sym->m_src;
359 
360 	sr->resp.orh = PENDING_SIG;
361 	sr->out.sglist[cnt].len = sizeof(sr->resp.orh);
362 	sr->out.sglist[cnt].iova = sr->iova + offsetof(struct nitrox_softreq,
363 						       resp.orh);
364 	sr->out.sglist[cnt].virt = &sr->resp.orh;
365 	cnt++;
366 
367 	sr->out.map_bufs_cnt = cnt;
368 	fill_sglist(&sr->out, sr->iv.len, sr->iv.iova, sr->iv.virt);
369 	err = create_sglist_from_mbuf(&sr->out, m_dst,
370 				      op->sym->cipher.data.offset,
371 				      op->sym->cipher.data.length);
372 	if (unlikely(err))
373 		return err;
374 
375 	cnt = sr->out.map_bufs_cnt;
376 	sr->resp.completion = PENDING_SIG;
377 	sr->out.sglist[cnt].len = sizeof(sr->resp.completion);
378 	sr->out.sglist[cnt].iova = sr->iova + offsetof(struct nitrox_softreq,
379 						     resp.completion);
380 	sr->out.sglist[cnt].virt = &sr->resp.completion;
381 	cnt++;
382 
383 	RTE_ASSERT(cnt <= MAX_SGBUF_CNT);
384 	sr->out.map_bufs_cnt = cnt;
385 
386 	create_sgcomp(&sr->out);
387 	sr->rptr = sr->iova + offsetof(struct nitrox_softreq, out.sgcomp);
388 
389 	return 0;
390 }
391 
392 static void
393 create_cipher_gph(uint32_t cryptlen, uint16_t ivlen, struct gphdr *gph)
394 {
395 	gph->param0 = rte_cpu_to_be_16(cryptlen);
396 	gph->param1 = 0;
397 	gph->param2 = rte_cpu_to_be_16(ivlen);
398 	gph->param3 = 0;
399 }
400 
401 static int
402 process_cipher_data(struct nitrox_softreq *sr)
403 {
404 	struct rte_crypto_op *op = sr->op;
405 	int err;
406 
407 	softreq_copy_iv(sr, 0);
408 	err = create_cipher_inbuf(sr);
409 	if (unlikely(err))
410 		return err;
411 
412 	err = create_cipher_outbuf(sr);
413 	if (unlikely(err))
414 		return err;
415 
416 	create_cipher_gph(op->sym->cipher.data.length, sr->iv.len, &sr->gph);
417 
418 	return 0;
419 }
420 
421 static int
422 extract_cipher_auth_digest(struct nitrox_softreq *sr,
423 			   struct nitrox_sglist *digest)
424 {
425 	struct rte_crypto_op *op = sr->op;
426 	struct rte_mbuf *mdst = op->sym->m_dst ? op->sym->m_dst :
427 					op->sym->m_src;
428 
429 	if (sr->ctx->req_op == NITROX_OP_DECRYPT &&
430 	    unlikely(!op->sym->auth.digest.data))
431 		return -EINVAL;
432 
433 	digest->len = sr->ctx->digest_length;
434 	if (op->sym->auth.digest.data) {
435 		digest->iova = op->sym->auth.digest.phys_addr;
436 		digest->virt = op->sym->auth.digest.data;
437 		return 0;
438 	}
439 
440 	if (unlikely(rte_pktmbuf_data_len(mdst) < op->sym->auth.data.offset +
441 	       op->sym->auth.data.length + digest->len))
442 		return -EINVAL;
443 
444 	digest->iova = rte_pktmbuf_iova_offset(mdst,
445 					op->sym->auth.data.offset +
446 					op->sym->auth.data.length);
447 	digest->virt = rte_pktmbuf_mtod_offset(mdst, uint8_t *,
448 					op->sym->auth.data.offset +
449 					op->sym->auth.data.length);
450 	return 0;
451 }
452 
453 static int
454 create_cipher_auth_sglist(struct nitrox_softreq *sr,
455 			  struct nitrox_sgtable *sgtbl, struct rte_mbuf *mbuf)
456 {
457 	struct rte_crypto_op *op = sr->op;
458 	int auth_only_len;
459 	int err;
460 
461 	fill_sglist(sgtbl, sr->iv.len, sr->iv.iova, sr->iv.virt);
462 	auth_only_len = op->sym->auth.data.length - op->sym->cipher.data.length;
463 	if (unlikely(auth_only_len < 0))
464 		return -EINVAL;
465 
466 	if (unlikely(
467 		op->sym->cipher.data.offset + op->sym->cipher.data.length !=
468 		op->sym->auth.data.offset + op->sym->auth.data.length)) {
469 		NITROX_LOG_LINE(ERR, "Auth only data after cipher data not supported");
470 		return -ENOTSUP;
471 	}
472 
473 	err = create_sglist_from_mbuf(sgtbl, mbuf, op->sym->auth.data.offset,
474 				      auth_only_len);
475 	if (unlikely(err))
476 		return err;
477 
478 	err = create_sglist_from_mbuf(sgtbl, mbuf, op->sym->cipher.data.offset,
479 				      op->sym->cipher.data.length);
480 	if (unlikely(err))
481 		return err;
482 
483 	return 0;
484 }
485 
486 static int
487 create_combined_sglist(struct nitrox_softreq *sr, struct nitrox_sgtable *sgtbl,
488 		       struct rte_mbuf *mbuf)
489 {
490 	struct rte_crypto_op *op = sr->op;
491 	uint32_t aad_offset = 0;
492 
493 	if (sr->ctx->aead_algo == RTE_CRYPTO_AEAD_AES_CCM)
494 		aad_offset = DPDK_AES_CCM_ADD_OFFSET;
495 
496 	fill_sglist(sgtbl, sr->iv.len, sr->iv.iova, sr->iv.virt);
497 	fill_sglist(sgtbl, sr->ctx->aad_length,
498 		    op->sym->aead.aad.phys_addr + aad_offset,
499 		    op->sym->aead.aad.data + aad_offset);
500 	return create_sglist_from_mbuf(sgtbl, mbuf, op->sym->cipher.data.offset,
501 				       op->sym->cipher.data.length);
502 }
503 
504 static int
505 create_aead_sglist(struct nitrox_softreq *sr, struct nitrox_sgtable *sgtbl,
506 		   struct rte_mbuf *mbuf)
507 {
508 	int err;
509 
510 	switch (sr->ctx->nitrox_chain) {
511 	case NITROX_CHAIN_CIPHER_AUTH:
512 	case NITROX_CHAIN_AUTH_CIPHER:
513 		err = create_cipher_auth_sglist(sr, sgtbl, mbuf);
514 		break;
515 	case NITROX_CHAIN_COMBINED:
516 		err = create_combined_sglist(sr, sgtbl, mbuf);
517 		break;
518 	default:
519 		err = -EINVAL;
520 		break;
521 	}
522 
523 	return err;
524 }
525 
526 static int
527 create_aead_inbuf(struct nitrox_softreq *sr, struct nitrox_sglist *digest)
528 {
529 	int err;
530 	struct nitrox_crypto_ctx *ctx = sr->ctx;
531 
532 	err = create_aead_sglist(sr, &sr->in, sr->op->sym->m_src);
533 	if (unlikely(err))
534 		return err;
535 
536 	if (ctx->req_op == NITROX_OP_DECRYPT)
537 		fill_sglist(&sr->in, digest->len, digest->iova, digest->virt);
538 
539 	create_sgcomp(&sr->in);
540 	sr->dptr = sr->iova + offsetof(struct nitrox_softreq, in.sgcomp);
541 	return 0;
542 }
543 
544 static int
545 create_aead_oop_outbuf(struct nitrox_softreq *sr, struct nitrox_sglist *digest)
546 {
547 	int err;
548 	struct nitrox_crypto_ctx *ctx = sr->ctx;
549 
550 	err = create_aead_sglist(sr, &sr->out, sr->op->sym->m_dst);
551 	if (unlikely(err))
552 		return err;
553 
554 	if (ctx->req_op == NITROX_OP_ENCRYPT)
555 		fill_sglist(&sr->out, digest->len, digest->iova, digest->virt);
556 
557 	return 0;
558 }
559 
560 static void
561 create_aead_inplace_outbuf(struct nitrox_softreq *sr,
562 			   struct nitrox_sglist *digest)
563 {
564 	int i, cnt;
565 	struct nitrox_crypto_ctx *ctx = sr->ctx;
566 
567 	cnt = sr->out.map_bufs_cnt;
568 	for (i = 0; i < sr->in.map_bufs_cnt; i++, cnt++) {
569 		sr->out.sglist[cnt].len = sr->in.sglist[i].len;
570 		sr->out.sglist[cnt].iova = sr->in.sglist[i].iova;
571 		sr->out.sglist[cnt].virt = sr->in.sglist[i].virt;
572 	}
573 
574 	sr->out.map_bufs_cnt = cnt;
575 	if (ctx->req_op == NITROX_OP_ENCRYPT) {
576 		fill_sglist(&sr->out, digest->len, digest->iova,
577 			    digest->virt);
578 	} else if (ctx->req_op == NITROX_OP_DECRYPT) {
579 		sr->out.map_bufs_cnt--;
580 	}
581 }
582 
583 static int
584 create_aead_outbuf(struct nitrox_softreq *sr, struct nitrox_sglist *digest)
585 {
586 	struct rte_crypto_op *op = sr->op;
587 	int cnt = 0;
588 
589 	sr->resp.orh = PENDING_SIG;
590 	sr->out.sglist[cnt].len = sizeof(sr->resp.orh);
591 	sr->out.sglist[cnt].iova = sr->iova + offsetof(struct nitrox_softreq,
592 						       resp.orh);
593 	sr->out.sglist[cnt].virt = &sr->resp.orh;
594 	cnt++;
595 	sr->out.map_bufs_cnt = cnt;
596 	if (op->sym->m_dst) {
597 		int err;
598 
599 		err = create_aead_oop_outbuf(sr, digest);
600 		if (unlikely(err))
601 			return err;
602 	} else {
603 		create_aead_inplace_outbuf(sr, digest);
604 	}
605 
606 	cnt = sr->out.map_bufs_cnt;
607 	sr->resp.completion = PENDING_SIG;
608 	sr->out.sglist[cnt].len = sizeof(sr->resp.completion);
609 	sr->out.sglist[cnt].iova = sr->iova + offsetof(struct nitrox_softreq,
610 						     resp.completion);
611 	sr->out.sglist[cnt].virt = &sr->resp.completion;
612 	cnt++;
613 	RTE_ASSERT(cnt <= MAX_SGBUF_CNT);
614 	sr->out.map_bufs_cnt = cnt;
615 
616 	create_sgcomp(&sr->out);
617 	sr->rptr = sr->iova + offsetof(struct nitrox_softreq, out.sgcomp);
618 	return 0;
619 }
620 
621 static void
622 create_aead_gph(uint32_t cryptlen, uint16_t ivlen, uint32_t authlen,
623 		struct gphdr *gph)
624 {
625 	int auth_only_len;
626 	union {
627 		struct {
628 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
629 			uint16_t iv_offset : 8;
630 			uint16_t auth_offset	: 8;
631 #else
632 			uint16_t auth_offset	: 8;
633 			uint16_t iv_offset : 8;
634 #endif
635 		};
636 		uint16_t value;
637 	} param3;
638 
639 	gph->param0 = rte_cpu_to_be_16(cryptlen);
640 	gph->param1 = rte_cpu_to_be_16(authlen);
641 
642 	auth_only_len = authlen - cryptlen;
643 	gph->param2 = rte_cpu_to_be_16(ivlen + auth_only_len);
644 
645 	param3.iv_offset = 0;
646 	param3.auth_offset = ivlen;
647 	gph->param3 = rte_cpu_to_be_16(param3.value);
648 }
649 
650 static int
651 process_cipher_auth_data(struct nitrox_softreq *sr)
652 {
653 	struct rte_crypto_op *op = sr->op;
654 	int err;
655 	struct nitrox_sglist digest;
656 
657 	softreq_copy_iv(sr, 0);
658 	err = extract_cipher_auth_digest(sr, &digest);
659 	if (unlikely(err))
660 		return err;
661 
662 	err = create_aead_inbuf(sr, &digest);
663 	if (unlikely(err))
664 		return err;
665 
666 	err = create_aead_outbuf(sr, &digest);
667 	if (unlikely(err))
668 		return err;
669 
670 	create_aead_gph(op->sym->cipher.data.length, sr->iv.len,
671 			op->sym->auth.data.length, &sr->gph);
672 	return 0;
673 }
674 
675 static int
676 softreq_copy_salt(struct nitrox_softreq *sr)
677 {
678 	struct nitrox_crypto_ctx *ctx = sr->ctx;
679 	uint8_t *addr;
680 
681 	if (unlikely(ctx->iv.length < AES_GCM_SALT_SIZE)) {
682 		NITROX_LOG_LINE(ERR, "Invalid IV length %d", ctx->iv.length);
683 		return -EINVAL;
684 	}
685 
686 	addr = rte_crypto_op_ctod_offset(sr->op, uint8_t *, ctx->iv.offset);
687 	if (!memcmp(ctx->salt, addr, AES_GCM_SALT_SIZE))
688 		return 0;
689 
690 	memcpy(ctx->salt, addr, AES_GCM_SALT_SIZE);
691 	memcpy(ctx->fctx.crypto.iv, addr, AES_GCM_SALT_SIZE);
692 	return 0;
693 }
694 
695 static int
696 extract_combined_digest(struct nitrox_softreq *sr, struct nitrox_sglist *digest)
697 {
698 	struct rte_crypto_op *op = sr->op;
699 	struct rte_mbuf *mdst = op->sym->m_dst ? op->sym->m_dst :
700 		op->sym->m_src;
701 
702 	digest->len = sr->ctx->digest_length;
703 	if (op->sym->aead.digest.data) {
704 		digest->iova = op->sym->aead.digest.phys_addr;
705 		digest->virt = op->sym->aead.digest.data;
706 
707 		return 0;
708 	}
709 
710 	if (unlikely(rte_pktmbuf_data_len(mdst) < op->sym->aead.data.offset +
711 	       op->sym->aead.data.length + digest->len))
712 		return -EINVAL;
713 
714 	digest->iova = rte_pktmbuf_iova_offset(mdst,
715 					op->sym->aead.data.offset +
716 					op->sym->aead.data.length);
717 	digest->virt = rte_pktmbuf_mtod_offset(mdst, uint8_t *,
718 					op->sym->aead.data.offset +
719 					op->sym->aead.data.length);
720 
721 	return 0;
722 }
723 
724 static int
725 process_combined_data(struct nitrox_softreq *sr)
726 {
727 	int err;
728 	struct nitrox_sglist digest;
729 	struct rte_crypto_op *op = sr->op;
730 
731 	if (sr->ctx->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
732 		err = softreq_copy_salt(sr);
733 		if (unlikely(err))
734 			return err;
735 
736 		softreq_copy_iv(sr, AES_GCM_SALT_SIZE);
737 	} else if (sr->ctx->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) {
738 		union {
739 			uint8_t value;
740 			struct {
741 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
742 				uint8_t rsvd: 1;
743 				uint8_t adata: 1;
744 				uint8_t mstar: 3;
745 				uint8_t lstar: 3;
746 #else
747 				uint8_t lstar: 3;
748 				uint8_t mstar: 3;
749 				uint8_t adata: 1;
750 				uint8_t rsvd: 1;
751 #endif
752 			};
753 		} flags;
754 		uint8_t L;
755 		uint8_t *iv_addr;
756 
757 		flags.value = 0;
758 		flags.rsvd = 0;
759 		flags.adata = (sr->ctx->aad_length > 0) ? 1 : 0;
760 		flags.mstar = (sr->ctx->digest_length - 2) / 2;
761 		L = 15 - sr->ctx->iv.length;
762 		flags.lstar = L - 1;
763 		iv_addr = rte_crypto_op_ctod_offset(sr->op, uint8_t *,
764 						    sr->ctx->iv.offset);
765 		/* initialize IV flags */
766 		iv_addr[0] = flags.value;
767 		/* initialize IV counter to 0 */
768 		memset(&iv_addr[1] + sr->ctx->iv.length, 0, L);
769 		sr->iv.virt = rte_crypto_op_ctod_offset(sr->op, uint8_t *,
770 							sr->ctx->iv.offset);
771 		sr->iv.iova = rte_crypto_op_ctophys_offset(sr->op,
772 							   sr->ctx->iv.offset);
773 		sr->iv.len = 16;
774 	} else {
775 		return -EINVAL;
776 	}
777 
778 	err = extract_combined_digest(sr, &digest);
779 	if (unlikely(err))
780 		return err;
781 
782 	err = create_aead_inbuf(sr, &digest);
783 	if (unlikely(err))
784 		return err;
785 
786 	err = create_aead_outbuf(sr, &digest);
787 	if (unlikely(err))
788 		return err;
789 
790 	create_aead_gph(op->sym->aead.data.length, sr->iv.len,
791 			op->sym->aead.data.length + sr->ctx->aad_length,
792 			&sr->gph);
793 
794 	return 0;
795 }
796 
797 static int
798 process_softreq(struct nitrox_softreq *sr)
799 {
800 	struct nitrox_crypto_ctx *ctx = sr->ctx;
801 	int err = 0;
802 
803 	switch (ctx->nitrox_chain) {
804 	case NITROX_CHAIN_CIPHER_ONLY:
805 		err = process_cipher_data(sr);
806 		break;
807 	case NITROX_CHAIN_CIPHER_AUTH:
808 	case NITROX_CHAIN_AUTH_CIPHER:
809 		err = process_cipher_auth_data(sr);
810 		break;
811 	case NITROX_CHAIN_COMBINED:
812 		err = process_combined_data(sr);
813 		break;
814 	default:
815 		err = -EINVAL;
816 		break;
817 	}
818 
819 	return err;
820 }
821 
822 int
823 nitrox_process_se_req(uint16_t qno, struct rte_crypto_op *op,
824 		      struct nitrox_crypto_ctx *ctx,
825 		      struct nitrox_softreq *sr)
826 {
827 	int err;
828 
829 	if (unlikely(op->sym->m_src->nb_segs > MAX_SUPPORTED_MBUF_SEGS ||
830 		     (op->sym->m_dst &&
831 		      op->sym->m_dst->nb_segs > MAX_SUPPORTED_MBUF_SEGS))) {
832 		NITROX_LOG_LINE(ERR, "Mbuf segments not supported. "
833 			   "Max supported %d", MAX_SUPPORTED_MBUF_SEGS);
834 		return -ENOTSUP;
835 	}
836 
837 	softreq_init(sr, sr->iova);
838 	sr->ctx = ctx;
839 	sr->op = op;
840 	err = process_softreq(sr);
841 	if (unlikely(err))
842 		return err;
843 
844 	create_se_instr(sr, qno);
845 	sr->timeout = rte_get_timer_cycles() + CMD_TIMEOUT * rte_get_timer_hz();
846 	return 0;
847 }
848 
849 int
850 nitrox_check_se_req(struct nitrox_softreq *sr, struct rte_crypto_op **op)
851 {
852 	uint64_t cc;
853 	uint64_t orh;
854 	int err;
855 
856 	cc = *(volatile uint64_t *)(&sr->resp.completion);
857 	orh = *(volatile uint64_t *)(&sr->resp.orh);
858 	if (cc != PENDING_SIG)
859 		err = orh & 0xff;
860 	else if ((orh != PENDING_SIG) && (orh & 0xff))
861 		err = orh & 0xff;
862 	else if (rte_get_timer_cycles() >= sr->timeout)
863 		err = 0xff;
864 	else
865 		return -EAGAIN;
866 
867 	if (unlikely(err))
868 		NITROX_LOG_LINE(ERR, "Request err 0x%x, orh 0x%"PRIx64, err,
869 			   sr->resp.orh);
870 
871 	*op = sr->op;
872 	return err;
873 }
874 
875 void *
876 nitrox_sym_instr_addr(struct nitrox_softreq *sr)
877 {
878 	return &sr->instr;
879 }
880 
881 static void
882 req_pool_obj_init(__rte_unused struct rte_mempool *mp,
883 		  __rte_unused void *opaque, void *obj,
884 		  __rte_unused unsigned int obj_idx)
885 {
886 	softreq_init(obj, rte_mempool_virt2iova(obj));
887 }
888 
889 struct rte_mempool *
890 nitrox_sym_req_pool_create(struct rte_cryptodev *cdev, uint32_t nobjs,
891 			   uint16_t qp_id, int socket_id)
892 {
893 	char softreq_pool_name[RTE_RING_NAMESIZE];
894 	struct rte_mempool *mp;
895 
896 	snprintf(softreq_pool_name, RTE_RING_NAMESIZE, "%s_sr_%d",
897 		 cdev->data->name, qp_id);
898 	mp = rte_mempool_create(softreq_pool_name,
899 				RTE_ALIGN_MUL_CEIL(nobjs, 64),
900 				sizeof(struct nitrox_softreq),
901 				64, 0, NULL, NULL, req_pool_obj_init, NULL,
902 				socket_id, 0);
903 	if (unlikely(!mp))
904 		NITROX_LOG_LINE(ERR, "Failed to create req pool, qid %d, err %d",
905 			   qp_id, rte_errno);
906 
907 	return mp;
908 }
909 
910 void
911 nitrox_sym_req_pool_free(struct rte_mempool *mp)
912 {
913 	rte_mempool_free(mp);
914 }
915