xref: /dpdk/drivers/crypto/nitrox/nitrox_sym_reqmgr.c (revision b53d106d34b5c638f5a2cbdfee0da5bd42d4383f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4 
5 #include <rte_crypto.h>
6 #include <cryptodev_pmd.h>
7 #include <rte_cycles.h>
8 #include <rte_errno.h>
9 
10 #include "nitrox_sym_reqmgr.h"
11 #include "nitrox_logs.h"
12 
13 #define MAX_SGBUF_CNT 16
14 #define MAX_SGCOMP_CNT 5
15 /* SLC_STORE_INFO */
16 #define MIN_UDD_LEN 16
17 /* PKT_IN_HDR + SLC_STORE_INFO */
18 #define FDATA_SIZE 32
19 /* Base destination port for the solicited requests */
20 #define SOLICIT_BASE_DPORT 256
21 #define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL
22 #define CMD_TIMEOUT 2
23 
24 struct gphdr {
25 	uint16_t param0;
26 	uint16_t param1;
27 	uint16_t param2;
28 	uint16_t param3;
29 };
30 
31 union pkt_instr_hdr {
32 	uint64_t value;
33 	struct {
34 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
35 		uint64_t raz_48_63 : 16;
36 		uint64_t g : 1;
37 		uint64_t gsz : 7;
38 		uint64_t ihi : 1;
39 		uint64_t ssz : 7;
40 		uint64_t raz_30_31 : 2;
41 		uint64_t fsz : 6;
42 		uint64_t raz_16_23 : 8;
43 		uint64_t tlen : 16;
44 #else
45 		uint64_t tlen : 16;
46 		uint64_t raz_16_23 : 8;
47 		uint64_t fsz : 6;
48 		uint64_t raz_30_31 : 2;
49 		uint64_t ssz : 7;
50 		uint64_t ihi : 1;
51 		uint64_t gsz : 7;
52 		uint64_t g : 1;
53 		uint64_t raz_48_63 : 16;
54 #endif
55 	} s;
56 };
57 
58 union pkt_hdr {
59 	uint64_t value[2];
60 	struct {
61 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
62 		uint64_t opcode : 8;
63 		uint64_t arg : 8;
64 		uint64_t ctxc : 2;
65 		uint64_t unca : 1;
66 		uint64_t raz_44 : 1;
67 		uint64_t info : 3;
68 		uint64_t destport : 9;
69 		uint64_t unc : 8;
70 		uint64_t raz_19_23 : 5;
71 		uint64_t grp : 3;
72 		uint64_t raz_15 : 1;
73 		uint64_t ctxl : 7;
74 		uint64_t uddl : 8;
75 #else
76 		uint64_t uddl : 8;
77 		uint64_t ctxl : 7;
78 		uint64_t raz_15 : 1;
79 		uint64_t grp : 3;
80 		uint64_t raz_19_23 : 5;
81 		uint64_t unc : 8;
82 		uint64_t destport : 9;
83 		uint64_t info : 3;
84 		uint64_t raz_44 : 1;
85 		uint64_t unca : 1;
86 		uint64_t ctxc : 2;
87 		uint64_t arg : 8;
88 		uint64_t opcode : 8;
89 #endif
90 		uint64_t ctxp;
91 	} s;
92 };
93 
94 union slc_store_info {
95 	uint64_t value[2];
96 	struct {
97 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
98 		uint64_t raz_39_63 : 25;
99 		uint64_t ssz : 7;
100 		uint64_t raz_0_31 : 32;
101 #else
102 		uint64_t raz_0_31 : 32;
103 		uint64_t ssz : 7;
104 		uint64_t raz_39_63 : 25;
105 #endif
106 		uint64_t rptr;
107 	} s;
108 };
109 
110 struct nps_pkt_instr {
111 	uint64_t dptr0;
112 	union pkt_instr_hdr ih;
113 	union pkt_hdr irh;
114 	union slc_store_info slc;
115 	uint64_t fdata[2];
116 };
117 
118 struct resp_hdr {
119 	uint64_t orh;
120 	uint64_t completion;
121 };
122 
123 struct nitrox_sglist {
124 	uint16_t len;
125 	uint16_t raz0;
126 	uint32_t raz1;
127 	rte_iova_t iova;
128 	void *virt;
129 };
130 
131 struct nitrox_sgcomp {
132 	uint16_t len[4];
133 	uint64_t iova[4];
134 };
135 
136 struct nitrox_sgtable {
137 	uint8_t map_bufs_cnt;
138 	uint8_t nr_sgcomp;
139 	uint16_t total_bytes;
140 
141 	struct nitrox_sglist sglist[MAX_SGBUF_CNT];
142 	struct nitrox_sgcomp sgcomp[MAX_SGCOMP_CNT];
143 };
144 
145 struct iv {
146 	uint8_t *virt;
147 	rte_iova_t iova;
148 	uint16_t len;
149 };
150 
151 struct nitrox_softreq {
152 	struct nitrox_crypto_ctx *ctx;
153 	struct rte_crypto_op *op;
154 	struct gphdr gph;
155 	struct nps_pkt_instr instr;
156 	struct resp_hdr resp;
157 	struct nitrox_sgtable in;
158 	struct nitrox_sgtable out;
159 	struct iv iv;
160 	uint64_t timeout;
161 	rte_iova_t dptr;
162 	rte_iova_t rptr;
163 	rte_iova_t iova;
164 };
165 
166 static void
167 softreq_init(struct nitrox_softreq *sr, rte_iova_t iova)
168 {
169 	memset(sr, 0, sizeof(*sr));
170 	sr->iova = iova;
171 }
172 
173 /*
174  * 64-Byte Instruction Format
175  *
176  *  ----------------------
177  *  |      DPTR0         | 8 bytes
178  *  ----------------------
179  *  |  PKT_IN_INSTR_HDR  | 8 bytes
180  *  ----------------------
181  *  |    PKT_IN_HDR      | 16 bytes
182  *  ----------------------
183  *  |    SLC_INFO        | 16 bytes
184  *  ----------------------
185  *  |   Front data       | 16 bytes
186  *  ----------------------
187  */
188 static void
189 create_se_instr(struct nitrox_softreq *sr, uint8_t qno)
190 {
191 	struct nitrox_crypto_ctx *ctx = sr->ctx;
192 	rte_iova_t ctx_handle;
193 
194 	/* fill the packet instruction */
195 	/* word 0 */
196 	sr->instr.dptr0 = rte_cpu_to_be_64(sr->dptr);
197 
198 	/* word 1 */
199 	sr->instr.ih.value = 0;
200 	sr->instr.ih.s.g = 1;
201 	sr->instr.ih.s.gsz = sr->in.map_bufs_cnt;
202 	sr->instr.ih.s.ssz = sr->out.map_bufs_cnt;
203 	sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr);
204 	sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes;
205 	sr->instr.ih.value = rte_cpu_to_be_64(sr->instr.ih.value);
206 
207 	/* word 2 */
208 	sr->instr.irh.value[0] = 0;
209 	sr->instr.irh.s.uddl = MIN_UDD_LEN;
210 	/* context length in 64-bit words */
211 	sr->instr.irh.s.ctxl = RTE_ALIGN_MUL_CEIL(sizeof(ctx->fctx), 8) / 8;
212 	/* offset from solicit base port 256 */
213 	sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno;
214 	/* Invalid context cache */
215 	sr->instr.irh.s.ctxc = 0x3;
216 	sr->instr.irh.s.arg = ctx->req_op;
217 	sr->instr.irh.s.opcode = ctx->opcode;
218 	sr->instr.irh.value[0] = rte_cpu_to_be_64(sr->instr.irh.value[0]);
219 
220 	/* word 3 */
221 	ctx_handle = ctx->iova + offsetof(struct nitrox_crypto_ctx, fctx);
222 	sr->instr.irh.s.ctxp = rte_cpu_to_be_64(ctx_handle);
223 
224 	/* word 4 */
225 	sr->instr.slc.value[0] = 0;
226 	sr->instr.slc.s.ssz = sr->out.map_bufs_cnt;
227 	sr->instr.slc.value[0] = rte_cpu_to_be_64(sr->instr.slc.value[0]);
228 
229 	/* word 5 */
230 	sr->instr.slc.s.rptr = rte_cpu_to_be_64(sr->rptr);
231 	/*
232 	 * No conversion for front data,
233 	 * It goes into payload
234 	 * put GP Header in front data
235 	 */
236 	memcpy(&sr->instr.fdata[0], &sr->gph, sizeof(sr->instr.fdata[0]));
237 	sr->instr.fdata[1] = 0;
238 }
239 
240 static void
241 softreq_copy_iv(struct nitrox_softreq *sr, uint8_t salt_size)
242 {
243 	uint16_t offset = sr->ctx->iv.offset + salt_size;
244 
245 	sr->iv.virt = rte_crypto_op_ctod_offset(sr->op, uint8_t *, offset);
246 	sr->iv.iova = rte_crypto_op_ctophys_offset(sr->op, offset);
247 	sr->iv.len = sr->ctx->iv.length - salt_size;
248 }
249 
250 static void
251 fill_sglist(struct nitrox_sgtable *sgtbl, uint16_t len, rte_iova_t iova,
252 	    void *virt)
253 {
254 	struct nitrox_sglist *sglist = sgtbl->sglist;
255 	uint8_t cnt = sgtbl->map_bufs_cnt;
256 
257 	if (unlikely(!len))
258 		return;
259 
260 	sglist[cnt].len = len;
261 	sglist[cnt].iova = iova;
262 	sglist[cnt].virt = virt;
263 	sgtbl->total_bytes += len;
264 	cnt++;
265 	sgtbl->map_bufs_cnt = cnt;
266 }
267 
268 static int
269 create_sglist_from_mbuf(struct nitrox_sgtable *sgtbl, struct rte_mbuf *mbuf,
270 			uint32_t off, int datalen)
271 {
272 	struct nitrox_sglist *sglist = sgtbl->sglist;
273 	uint8_t cnt = sgtbl->map_bufs_cnt;
274 	struct rte_mbuf *m;
275 	int mlen;
276 
277 	if (unlikely(datalen <= 0))
278 		return 0;
279 
280 	for (m = mbuf; m && off > rte_pktmbuf_data_len(m); m = m->next)
281 		off -= rte_pktmbuf_data_len(m);
282 
283 	if (unlikely(!m))
284 		return -EIO;
285 
286 	mlen = rte_pktmbuf_data_len(m) - off;
287 	if (datalen <= mlen)
288 		mlen = datalen;
289 	sglist[cnt].len = mlen;
290 	sglist[cnt].iova = rte_pktmbuf_iova_offset(m, off);
291 	sglist[cnt].virt = rte_pktmbuf_mtod_offset(m, uint8_t *, off);
292 	sgtbl->total_bytes += mlen;
293 	cnt++;
294 	datalen -= mlen;
295 	for (m = m->next; m && datalen; m = m->next) {
296 		mlen = rte_pktmbuf_data_len(m) < datalen ?
297 			rte_pktmbuf_data_len(m) : datalen;
298 		sglist[cnt].len = mlen;
299 		sglist[cnt].iova = rte_pktmbuf_iova(m);
300 		sglist[cnt].virt = rte_pktmbuf_mtod(m, uint8_t *);
301 		sgtbl->total_bytes += mlen;
302 		cnt++;
303 		datalen -= mlen;
304 	}
305 
306 	RTE_VERIFY(cnt <= MAX_SGBUF_CNT);
307 	sgtbl->map_bufs_cnt = cnt;
308 	return 0;
309 }
310 
311 static void
312 create_sgcomp(struct nitrox_sgtable *sgtbl)
313 {
314 	int i, j, nr_sgcomp;
315 	struct nitrox_sgcomp *sgcomp = sgtbl->sgcomp;
316 	struct nitrox_sglist *sglist = sgtbl->sglist;
317 
318 	nr_sgcomp = RTE_ALIGN_MUL_CEIL(sgtbl->map_bufs_cnt, 4) / 4;
319 	sgtbl->nr_sgcomp = nr_sgcomp;
320 	for (i = 0; i < nr_sgcomp; i++, sgcomp++) {
321 		for (j = 0; j < 4; j++, sglist++) {
322 			sgcomp->len[j] = rte_cpu_to_be_16(sglist->len);
323 			sgcomp->iova[j] = rte_cpu_to_be_64(sglist->iova);
324 		}
325 	}
326 }
327 
328 static int
329 create_cipher_inbuf(struct nitrox_softreq *sr)
330 {
331 	int err;
332 	struct rte_crypto_op *op = sr->op;
333 
334 	fill_sglist(&sr->in, sr->iv.len, sr->iv.iova, sr->iv.virt);
335 	err = create_sglist_from_mbuf(&sr->in, op->sym->m_src,
336 				      op->sym->cipher.data.offset,
337 				      op->sym->cipher.data.length);
338 	if (unlikely(err))
339 		return err;
340 
341 	create_sgcomp(&sr->in);
342 	sr->dptr = sr->iova + offsetof(struct nitrox_softreq, in.sgcomp);
343 
344 	return 0;
345 }
346 
347 static int
348 create_cipher_outbuf(struct nitrox_softreq *sr)
349 {
350 	struct rte_crypto_op *op = sr->op;
351 	int err, cnt = 0;
352 	struct rte_mbuf *m_dst = op->sym->m_dst ? op->sym->m_dst :
353 		op->sym->m_src;
354 
355 	sr->resp.orh = PENDING_SIG;
356 	sr->out.sglist[cnt].len = sizeof(sr->resp.orh);
357 	sr->out.sglist[cnt].iova = sr->iova + offsetof(struct nitrox_softreq,
358 						       resp.orh);
359 	sr->out.sglist[cnt].virt = &sr->resp.orh;
360 	cnt++;
361 
362 	sr->out.map_bufs_cnt = cnt;
363 	fill_sglist(&sr->out, sr->iv.len, sr->iv.iova, sr->iv.virt);
364 	err = create_sglist_from_mbuf(&sr->out, m_dst,
365 				      op->sym->cipher.data.offset,
366 				      op->sym->cipher.data.length);
367 	if (unlikely(err))
368 		return err;
369 
370 	cnt = sr->out.map_bufs_cnt;
371 	sr->resp.completion = PENDING_SIG;
372 	sr->out.sglist[cnt].len = sizeof(sr->resp.completion);
373 	sr->out.sglist[cnt].iova = sr->iova + offsetof(struct nitrox_softreq,
374 						     resp.completion);
375 	sr->out.sglist[cnt].virt = &sr->resp.completion;
376 	cnt++;
377 
378 	RTE_VERIFY(cnt <= MAX_SGBUF_CNT);
379 	sr->out.map_bufs_cnt = cnt;
380 
381 	create_sgcomp(&sr->out);
382 	sr->rptr = sr->iova + offsetof(struct nitrox_softreq, out.sgcomp);
383 
384 	return 0;
385 }
386 
387 static void
388 create_cipher_gph(uint32_t cryptlen, uint16_t ivlen, struct gphdr *gph)
389 {
390 	gph->param0 = rte_cpu_to_be_16(cryptlen);
391 	gph->param1 = 0;
392 	gph->param2 = rte_cpu_to_be_16(ivlen);
393 	gph->param3 = 0;
394 }
395 
396 static int
397 process_cipher_data(struct nitrox_softreq *sr)
398 {
399 	struct rte_crypto_op *op = sr->op;
400 	int err;
401 
402 	softreq_copy_iv(sr, 0);
403 	err = create_cipher_inbuf(sr);
404 	if (unlikely(err))
405 		return err;
406 
407 	err = create_cipher_outbuf(sr);
408 	if (unlikely(err))
409 		return err;
410 
411 	create_cipher_gph(op->sym->cipher.data.length, sr->iv.len, &sr->gph);
412 
413 	return 0;
414 }
415 
416 static int
417 extract_cipher_auth_digest(struct nitrox_softreq *sr,
418 			   struct nitrox_sglist *digest)
419 {
420 	struct rte_crypto_op *op = sr->op;
421 	struct rte_mbuf *mdst = op->sym->m_dst ? op->sym->m_dst :
422 					op->sym->m_src;
423 
424 	if (sr->ctx->req_op == NITROX_OP_DECRYPT &&
425 	    unlikely(!op->sym->auth.digest.data))
426 		return -EINVAL;
427 
428 	digest->len = sr->ctx->digest_length;
429 	if (op->sym->auth.digest.data) {
430 		digest->iova = op->sym->auth.digest.phys_addr;
431 		digest->virt = op->sym->auth.digest.data;
432 		return 0;
433 	}
434 
435 	if (unlikely(rte_pktmbuf_data_len(mdst) < op->sym->auth.data.offset +
436 	       op->sym->auth.data.length + digest->len))
437 		return -EINVAL;
438 
439 	digest->iova = rte_pktmbuf_iova_offset(mdst,
440 					op->sym->auth.data.offset +
441 					op->sym->auth.data.length);
442 	digest->virt = rte_pktmbuf_mtod_offset(mdst, uint8_t *,
443 					op->sym->auth.data.offset +
444 					op->sym->auth.data.length);
445 	return 0;
446 }
447 
448 static int
449 create_cipher_auth_sglist(struct nitrox_softreq *sr,
450 			  struct nitrox_sgtable *sgtbl, struct rte_mbuf *mbuf)
451 {
452 	struct rte_crypto_op *op = sr->op;
453 	int auth_only_len;
454 	int err;
455 
456 	fill_sglist(sgtbl, sr->iv.len, sr->iv.iova, sr->iv.virt);
457 	auth_only_len = op->sym->auth.data.length - op->sym->cipher.data.length;
458 	if (unlikely(auth_only_len < 0))
459 		return -EINVAL;
460 
461 	if (unlikely(
462 		op->sym->cipher.data.offset + op->sym->cipher.data.length !=
463 		op->sym->auth.data.offset + op->sym->auth.data.length)) {
464 		NITROX_LOG(ERR, "Auth only data after cipher data not supported\n");
465 		return -ENOTSUP;
466 	}
467 
468 	err = create_sglist_from_mbuf(sgtbl, mbuf, op->sym->auth.data.offset,
469 				      auth_only_len);
470 	if (unlikely(err))
471 		return err;
472 
473 	err = create_sglist_from_mbuf(sgtbl, mbuf, op->sym->cipher.data.offset,
474 				      op->sym->cipher.data.length);
475 	if (unlikely(err))
476 		return err;
477 
478 	return 0;
479 }
480 
481 static int
482 create_combined_sglist(struct nitrox_softreq *sr, struct nitrox_sgtable *sgtbl,
483 		       struct rte_mbuf *mbuf)
484 {
485 	struct rte_crypto_op *op = sr->op;
486 
487 	fill_sglist(sgtbl, sr->iv.len, sr->iv.iova, sr->iv.virt);
488 	fill_sglist(sgtbl, sr->ctx->aad_length, op->sym->aead.aad.phys_addr,
489 		    op->sym->aead.aad.data);
490 	return create_sglist_from_mbuf(sgtbl, mbuf, op->sym->cipher.data.offset,
491 				       op->sym->cipher.data.length);
492 }
493 
494 static int
495 create_aead_sglist(struct nitrox_softreq *sr, struct nitrox_sgtable *sgtbl,
496 		   struct rte_mbuf *mbuf)
497 {
498 	int err;
499 
500 	switch (sr->ctx->nitrox_chain) {
501 	case NITROX_CHAIN_CIPHER_AUTH:
502 	case NITROX_CHAIN_AUTH_CIPHER:
503 		err = create_cipher_auth_sglist(sr, sgtbl, mbuf);
504 		break;
505 	case NITROX_CHAIN_COMBINED:
506 		err = create_combined_sglist(sr, sgtbl, mbuf);
507 		break;
508 	default:
509 		err = -EINVAL;
510 		break;
511 	}
512 
513 	return err;
514 }
515 
516 static int
517 create_aead_inbuf(struct nitrox_softreq *sr, struct nitrox_sglist *digest)
518 {
519 	int err;
520 	struct nitrox_crypto_ctx *ctx = sr->ctx;
521 
522 	err = create_aead_sglist(sr, &sr->in, sr->op->sym->m_src);
523 	if (unlikely(err))
524 		return err;
525 
526 	if (ctx->req_op == NITROX_OP_DECRYPT)
527 		fill_sglist(&sr->in, digest->len, digest->iova, digest->virt);
528 
529 	create_sgcomp(&sr->in);
530 	sr->dptr = sr->iova + offsetof(struct nitrox_softreq, in.sgcomp);
531 	return 0;
532 }
533 
534 static int
535 create_aead_oop_outbuf(struct nitrox_softreq *sr, struct nitrox_sglist *digest)
536 {
537 	int err;
538 	struct nitrox_crypto_ctx *ctx = sr->ctx;
539 
540 	err = create_aead_sglist(sr, &sr->out, sr->op->sym->m_dst);
541 	if (unlikely(err))
542 		return err;
543 
544 	if (ctx->req_op == NITROX_OP_ENCRYPT)
545 		fill_sglist(&sr->out, digest->len, digest->iova, digest->virt);
546 
547 	return 0;
548 }
549 
550 static void
551 create_aead_inplace_outbuf(struct nitrox_softreq *sr,
552 			   struct nitrox_sglist *digest)
553 {
554 	int i, cnt;
555 	struct nitrox_crypto_ctx *ctx = sr->ctx;
556 
557 	cnt = sr->out.map_bufs_cnt;
558 	for (i = 0; i < sr->in.map_bufs_cnt; i++, cnt++) {
559 		sr->out.sglist[cnt].len = sr->in.sglist[i].len;
560 		sr->out.sglist[cnt].iova = sr->in.sglist[i].iova;
561 		sr->out.sglist[cnt].virt = sr->in.sglist[i].virt;
562 	}
563 
564 	sr->out.map_bufs_cnt = cnt;
565 	if (ctx->req_op == NITROX_OP_ENCRYPT) {
566 		fill_sglist(&sr->out, digest->len, digest->iova,
567 			    digest->virt);
568 	} else if (ctx->req_op == NITROX_OP_DECRYPT) {
569 		sr->out.map_bufs_cnt--;
570 	}
571 }
572 
573 static int
574 create_aead_outbuf(struct nitrox_softreq *sr, struct nitrox_sglist *digest)
575 {
576 	struct rte_crypto_op *op = sr->op;
577 	int cnt = 0;
578 
579 	sr->resp.orh = PENDING_SIG;
580 	sr->out.sglist[cnt].len = sizeof(sr->resp.orh);
581 	sr->out.sglist[cnt].iova = sr->iova + offsetof(struct nitrox_softreq,
582 						       resp.orh);
583 	sr->out.sglist[cnt].virt = &sr->resp.orh;
584 	cnt++;
585 	sr->out.map_bufs_cnt = cnt;
586 	if (op->sym->m_dst) {
587 		int err;
588 
589 		err = create_aead_oop_outbuf(sr, digest);
590 		if (unlikely(err))
591 			return err;
592 	} else {
593 		create_aead_inplace_outbuf(sr, digest);
594 	}
595 
596 	cnt = sr->out.map_bufs_cnt;
597 	sr->resp.completion = PENDING_SIG;
598 	sr->out.sglist[cnt].len = sizeof(sr->resp.completion);
599 	sr->out.sglist[cnt].iova = sr->iova + offsetof(struct nitrox_softreq,
600 						     resp.completion);
601 	sr->out.sglist[cnt].virt = &sr->resp.completion;
602 	cnt++;
603 	RTE_VERIFY(cnt <= MAX_SGBUF_CNT);
604 	sr->out.map_bufs_cnt = cnt;
605 
606 	create_sgcomp(&sr->out);
607 	sr->rptr = sr->iova + offsetof(struct nitrox_softreq, out.sgcomp);
608 	return 0;
609 }
610 
611 static void
612 create_aead_gph(uint32_t cryptlen, uint16_t ivlen, uint32_t authlen,
613 		struct gphdr *gph)
614 {
615 	int auth_only_len;
616 	union {
617 		struct {
618 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
619 			uint16_t iv_offset : 8;
620 			uint16_t auth_offset	: 8;
621 #else
622 			uint16_t auth_offset	: 8;
623 			uint16_t iv_offset : 8;
624 #endif
625 		};
626 		uint16_t value;
627 	} param3;
628 
629 	gph->param0 = rte_cpu_to_be_16(cryptlen);
630 	gph->param1 = rte_cpu_to_be_16(authlen);
631 
632 	auth_only_len = authlen - cryptlen;
633 	gph->param2 = rte_cpu_to_be_16(ivlen + auth_only_len);
634 
635 	param3.iv_offset = 0;
636 	param3.auth_offset = ivlen;
637 	gph->param3 = rte_cpu_to_be_16(param3.value);
638 }
639 
640 static int
641 process_cipher_auth_data(struct nitrox_softreq *sr)
642 {
643 	struct rte_crypto_op *op = sr->op;
644 	int err;
645 	struct nitrox_sglist digest;
646 
647 	softreq_copy_iv(sr, 0);
648 	err = extract_cipher_auth_digest(sr, &digest);
649 	if (unlikely(err))
650 		return err;
651 
652 	err = create_aead_inbuf(sr, &digest);
653 	if (unlikely(err))
654 		return err;
655 
656 	err = create_aead_outbuf(sr, &digest);
657 	if (unlikely(err))
658 		return err;
659 
660 	create_aead_gph(op->sym->cipher.data.length, sr->iv.len,
661 			op->sym->auth.data.length, &sr->gph);
662 	return 0;
663 }
664 
665 static int
666 softreq_copy_salt(struct nitrox_softreq *sr)
667 {
668 	struct nitrox_crypto_ctx *ctx = sr->ctx;
669 	uint8_t *addr;
670 
671 	if (unlikely(ctx->iv.length < AES_GCM_SALT_SIZE)) {
672 		NITROX_LOG(ERR, "Invalid IV length %d\n", ctx->iv.length);
673 		return -EINVAL;
674 	}
675 
676 	addr = rte_crypto_op_ctod_offset(sr->op, uint8_t *, ctx->iv.offset);
677 	if (!memcmp(ctx->salt, addr, AES_GCM_SALT_SIZE))
678 		return 0;
679 
680 	memcpy(ctx->salt, addr, AES_GCM_SALT_SIZE);
681 	memcpy(ctx->fctx.crypto.iv, addr, AES_GCM_SALT_SIZE);
682 	return 0;
683 }
684 
685 static int
686 extract_combined_digest(struct nitrox_softreq *sr, struct nitrox_sglist *digest)
687 {
688 	struct rte_crypto_op *op = sr->op;
689 	struct rte_mbuf *mdst = op->sym->m_dst ? op->sym->m_dst :
690 		op->sym->m_src;
691 
692 	digest->len = sr->ctx->digest_length;
693 	if (op->sym->aead.digest.data) {
694 		digest->iova = op->sym->aead.digest.phys_addr;
695 		digest->virt = op->sym->aead.digest.data;
696 
697 		return 0;
698 	}
699 
700 	if (unlikely(rte_pktmbuf_data_len(mdst) < op->sym->aead.data.offset +
701 	       op->sym->aead.data.length + digest->len))
702 		return -EINVAL;
703 
704 	digest->iova = rte_pktmbuf_iova_offset(mdst,
705 					op->sym->aead.data.offset +
706 					op->sym->aead.data.length);
707 	digest->virt = rte_pktmbuf_mtod_offset(mdst, uint8_t *,
708 					op->sym->aead.data.offset +
709 					op->sym->aead.data.length);
710 
711 	return 0;
712 }
713 
714 static int
715 process_combined_data(struct nitrox_softreq *sr)
716 {
717 	int err;
718 	struct nitrox_sglist digest;
719 	struct rte_crypto_op *op = sr->op;
720 
721 	err = softreq_copy_salt(sr);
722 	if (unlikely(err))
723 		return err;
724 
725 	softreq_copy_iv(sr, AES_GCM_SALT_SIZE);
726 	err = extract_combined_digest(sr, &digest);
727 	if (unlikely(err))
728 		return err;
729 
730 	err = create_aead_inbuf(sr, &digest);
731 	if (unlikely(err))
732 		return err;
733 
734 	err = create_aead_outbuf(sr, &digest);
735 	if (unlikely(err))
736 		return err;
737 
738 	create_aead_gph(op->sym->aead.data.length, sr->iv.len,
739 			op->sym->aead.data.length + sr->ctx->aad_length,
740 			&sr->gph);
741 
742 	return 0;
743 }
744 
745 static int
746 process_softreq(struct nitrox_softreq *sr)
747 {
748 	struct nitrox_crypto_ctx *ctx = sr->ctx;
749 	int err = 0;
750 
751 	switch (ctx->nitrox_chain) {
752 	case NITROX_CHAIN_CIPHER_ONLY:
753 		err = process_cipher_data(sr);
754 		break;
755 	case NITROX_CHAIN_CIPHER_AUTH:
756 	case NITROX_CHAIN_AUTH_CIPHER:
757 		err = process_cipher_auth_data(sr);
758 		break;
759 	case NITROX_CHAIN_COMBINED:
760 		err = process_combined_data(sr);
761 		break;
762 	default:
763 		err = -EINVAL;
764 		break;
765 	}
766 
767 	return err;
768 }
769 
770 int
771 nitrox_process_se_req(uint16_t qno, struct rte_crypto_op *op,
772 		      struct nitrox_crypto_ctx *ctx,
773 		      struct nitrox_softreq *sr)
774 {
775 	int err;
776 
777 	softreq_init(sr, sr->iova);
778 	sr->ctx = ctx;
779 	sr->op = op;
780 	err = process_softreq(sr);
781 	if (unlikely(err))
782 		return err;
783 
784 	create_se_instr(sr, qno);
785 	sr->timeout = rte_get_timer_cycles() + CMD_TIMEOUT * rte_get_timer_hz();
786 	return 0;
787 }
788 
789 int
790 nitrox_check_se_req(struct nitrox_softreq *sr, struct rte_crypto_op **op)
791 {
792 	uint64_t cc;
793 	uint64_t orh;
794 	int err;
795 
796 	cc = *(volatile uint64_t *)(&sr->resp.completion);
797 	orh = *(volatile uint64_t *)(&sr->resp.orh);
798 	if (cc != PENDING_SIG)
799 		err = orh & 0xff;
800 	else if ((orh != PENDING_SIG) && (orh & 0xff))
801 		err = orh & 0xff;
802 	else if (rte_get_timer_cycles() >= sr->timeout)
803 		err = 0xff;
804 	else
805 		return -EAGAIN;
806 
807 	if (unlikely(err))
808 		NITROX_LOG(ERR, "Request err 0x%x, orh 0x%"PRIx64"\n", err,
809 			   sr->resp.orh);
810 
811 	*op = sr->op;
812 	return err;
813 }
814 
815 void *
816 nitrox_sym_instr_addr(struct nitrox_softreq *sr)
817 {
818 	return &sr->instr;
819 }
820 
821 static void
822 req_pool_obj_init(__rte_unused struct rte_mempool *mp,
823 		  __rte_unused void *opaque, void *obj,
824 		  __rte_unused unsigned int obj_idx)
825 {
826 	softreq_init(obj, rte_mempool_virt2iova(obj));
827 }
828 
829 struct rte_mempool *
830 nitrox_sym_req_pool_create(struct rte_cryptodev *cdev, uint32_t nobjs,
831 			   uint16_t qp_id, int socket_id)
832 {
833 	char softreq_pool_name[RTE_RING_NAMESIZE];
834 	struct rte_mempool *mp;
835 
836 	snprintf(softreq_pool_name, RTE_RING_NAMESIZE, "%s_sr_%d",
837 		 cdev->data->name, qp_id);
838 	mp = rte_mempool_create(softreq_pool_name,
839 				RTE_ALIGN_MUL_CEIL(nobjs, 64),
840 				sizeof(struct nitrox_softreq),
841 				64, 0, NULL, NULL, req_pool_obj_init, NULL,
842 				socket_id, 0);
843 	if (unlikely(!mp))
844 		NITROX_LOG(ERR, "Failed to create req pool, qid %d, err %d\n",
845 			   qp_id, rte_errno);
846 
847 	return mp;
848 }
849 
850 void
851 nitrox_sym_req_pool_free(struct rte_mempool *mp)
852 {
853 	rte_mempool_free(mp);
854 }
855