xref: /dpdk/drivers/crypto/nitrox/nitrox_sym_reqmgr.c (revision f5057be340e44f3edc0fe90fa875eb89a4c49b4f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4 
5 #include <rte_crypto.h>
6 #include <rte_cryptodev.h>
7 #include <rte_cycles.h>
8 #include <rte_errno.h>
9 
10 #include "nitrox_sym_reqmgr.h"
11 #include "nitrox_logs.h"
12 
13 #define MAX_SGBUF_CNT 16
14 #define MAX_SGCOMP_CNT 5
15 /* SLC_STORE_INFO */
16 #define MIN_UDD_LEN 16
17 /* PKT_IN_HDR + SLC_STORE_INFO */
18 #define FDATA_SIZE 32
19 /* Base destination port for the solicited requests */
20 #define SOLICIT_BASE_DPORT 256
21 #define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL
22 #define CMD_TIMEOUT 2
23 
24 struct gphdr {
25 	uint16_t param0;
26 	uint16_t param1;
27 	uint16_t param2;
28 	uint16_t param3;
29 };
30 
31 union pkt_instr_hdr {
32 	uint64_t value;
33 	struct {
34 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
35 		uint64_t raz_48_63 : 16;
36 		uint64_t g : 1;
37 		uint64_t gsz : 7;
38 		uint64_t ihi : 1;
39 		uint64_t ssz : 7;
40 		uint64_t raz_30_31 : 2;
41 		uint64_t fsz : 6;
42 		uint64_t raz_16_23 : 8;
43 		uint64_t tlen : 16;
44 #else
45 		uint64_t tlen : 16;
46 		uint64_t raz_16_23 : 8;
47 		uint64_t fsz : 6;
48 		uint64_t raz_30_31 : 2;
49 		uint64_t ssz : 7;
50 		uint64_t ihi : 1;
51 		uint64_t gsz : 7;
52 		uint64_t g : 1;
53 		uint64_t raz_48_63 : 16;
54 #endif
55 	} s;
56 };
57 
58 union pkt_hdr {
59 	uint64_t value[2];
60 	struct {
61 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
62 		uint64_t opcode : 8;
63 		uint64_t arg : 8;
64 		uint64_t ctxc : 2;
65 		uint64_t unca : 1;
66 		uint64_t raz_44 : 1;
67 		uint64_t info : 3;
68 		uint64_t destport : 9;
69 		uint64_t unc : 8;
70 		uint64_t raz_19_23 : 5;
71 		uint64_t grp : 3;
72 		uint64_t raz_15 : 1;
73 		uint64_t ctxl : 7;
74 		uint64_t uddl : 8;
75 #else
76 		uint64_t uddl : 8;
77 		uint64_t ctxl : 7;
78 		uint64_t raz_15 : 1;
79 		uint64_t grp : 3;
80 		uint64_t raz_19_23 : 5;
81 		uint64_t unc : 8;
82 		uint64_t destport : 9;
83 		uint64_t info : 3;
84 		uint64_t raz_44 : 1;
85 		uint64_t unca : 1;
86 		uint64_t ctxc : 2;
87 		uint64_t arg : 8;
88 		uint64_t opcode : 8;
89 #endif
90 		uint64_t ctxp;
91 	} s;
92 };
93 
94 union slc_store_info {
95 	uint64_t value[2];
96 	struct {
97 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
98 		uint64_t raz_39_63 : 25;
99 		uint64_t ssz : 7;
100 		uint64_t raz_0_31 : 32;
101 #else
102 		uint64_t raz_0_31 : 32;
103 		uint64_t ssz : 7;
104 		uint64_t raz_39_63 : 25;
105 #endif
106 		uint64_t rptr;
107 	} s;
108 };
109 
110 struct nps_pkt_instr {
111 	uint64_t dptr0;
112 	union pkt_instr_hdr ih;
113 	union pkt_hdr irh;
114 	union slc_store_info slc;
115 	uint64_t fdata[2];
116 };
117 
118 struct resp_hdr {
119 	uint64_t orh;
120 	uint64_t completion;
121 };
122 
123 struct nitrox_sglist {
124 	uint16_t len;
125 	uint16_t raz0;
126 	uint32_t raz1;
127 	rte_iova_t iova;
128 	void *virt;
129 };
130 
131 struct nitrox_sgcomp {
132 	uint16_t len[4];
133 	uint64_t iova[4];
134 };
135 
136 struct nitrox_sgtable {
137 	uint8_t map_bufs_cnt;
138 	uint8_t nr_sgcomp;
139 	uint16_t total_bytes;
140 
141 	struct nitrox_sglist sglist[MAX_SGBUF_CNT];
142 	struct nitrox_sgcomp sgcomp[MAX_SGCOMP_CNT];
143 };
144 
145 struct iv {
146 	uint8_t *virt;
147 	rte_iova_t iova;
148 	uint16_t len;
149 };
150 
151 struct nitrox_softreq {
152 	struct nitrox_crypto_ctx *ctx;
153 	struct rte_crypto_op *op;
154 	struct gphdr gph;
155 	struct nps_pkt_instr instr;
156 	struct resp_hdr resp;
157 	struct nitrox_sgtable in;
158 	struct nitrox_sgtable out;
159 	struct iv iv;
160 	uint64_t timeout;
161 	rte_iova_t dptr;
162 	rte_iova_t rptr;
163 	rte_iova_t iova;
164 };
165 
166 static void
167 softreq_init(struct nitrox_softreq *sr, rte_iova_t iova)
168 {
169 	memset(sr, 0, sizeof(*sr));
170 	sr->iova = iova;
171 }
172 
173 /*
174  * 64-Byte Instruction Format
175  *
176  *  ----------------------
177  *  |      DPTR0         | 8 bytes
178  *  ----------------------
179  *  |  PKT_IN_INSTR_HDR  | 8 bytes
180  *  ----------------------
181  *  |    PKT_IN_HDR      | 16 bytes
182  *  ----------------------
183  *  |    SLC_INFO        | 16 bytes
184  *  ----------------------
185  *  |   Front data       | 16 bytes
186  *  ----------------------
187  */
188 static void
189 create_se_instr(struct nitrox_softreq *sr, uint8_t qno)
190 {
191 	struct nitrox_crypto_ctx *ctx = sr->ctx;
192 	rte_iova_t ctx_handle;
193 
194 	/* fill the packet instruction */
195 	/* word 0 */
196 	sr->instr.dptr0 = rte_cpu_to_be_64(sr->dptr);
197 
198 	/* word 1 */
199 	sr->instr.ih.value = 0;
200 	sr->instr.ih.s.g = 1;
201 	sr->instr.ih.s.gsz = sr->in.map_bufs_cnt;
202 	sr->instr.ih.s.ssz = sr->out.map_bufs_cnt;
203 	sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr);
204 	sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes;
205 	sr->instr.ih.value = rte_cpu_to_be_64(sr->instr.ih.value);
206 
207 	/* word 2 */
208 	sr->instr.irh.value[0] = 0;
209 	sr->instr.irh.s.uddl = MIN_UDD_LEN;
210 	/* context length in 64-bit words */
211 	sr->instr.irh.s.ctxl = RTE_ALIGN_MUL_CEIL(sizeof(ctx->fctx), 8) / 8;
212 	/* offset from solicit base port 256 */
213 	sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno;
214 	/* Invalid context cache */
215 	sr->instr.irh.s.ctxc = 0x3;
216 	sr->instr.irh.s.arg = ctx->req_op;
217 	sr->instr.irh.s.opcode = ctx->opcode;
218 	sr->instr.irh.value[0] = rte_cpu_to_be_64(sr->instr.irh.value[0]);
219 
220 	/* word 3 */
221 	ctx_handle = ctx->iova + offsetof(struct nitrox_crypto_ctx, fctx);
222 	sr->instr.irh.s.ctxp = rte_cpu_to_be_64(ctx_handle);
223 
224 	/* word 4 */
225 	sr->instr.slc.value[0] = 0;
226 	sr->instr.slc.s.ssz = sr->out.map_bufs_cnt;
227 	sr->instr.slc.value[0] = rte_cpu_to_be_64(sr->instr.slc.value[0]);
228 
229 	/* word 5 */
230 	sr->instr.slc.s.rptr = rte_cpu_to_be_64(sr->rptr);
231 	/*
232 	 * No conversion for front data,
233 	 * It goes into payload
234 	 * put GP Header in front data
235 	 */
236 	memcpy(&sr->instr.fdata[0], &sr->gph, sizeof(sr->instr.fdata[0]));
237 	sr->instr.fdata[1] = 0;
238 }
239 
240 static void
241 softreq_copy_iv(struct nitrox_softreq *sr)
242 {
243 	sr->iv.virt = rte_crypto_op_ctod_offset(sr->op, uint8_t *,
244 						sr->ctx->iv.offset);
245 	sr->iv.iova = rte_crypto_op_ctophys_offset(sr->op, sr->ctx->iv.offset);
246 	sr->iv.len = sr->ctx->iv.length;
247 }
248 
249 static int
250 extract_cipher_auth_digest(struct nitrox_softreq *sr,
251 			   struct nitrox_sglist *digest)
252 {
253 	struct rte_crypto_op *op = sr->op;
254 	struct rte_mbuf *mdst = op->sym->m_dst ? op->sym->m_dst :
255 					op->sym->m_src;
256 
257 	if (sr->ctx->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY &&
258 	    unlikely(!op->sym->auth.digest.data))
259 		return -EINVAL;
260 
261 	digest->len = sr->ctx->digest_length;
262 	if (op->sym->auth.digest.data) {
263 		digest->iova = op->sym->auth.digest.phys_addr;
264 		digest->virt = op->sym->auth.digest.data;
265 		return 0;
266 	}
267 
268 	if (unlikely(rte_pktmbuf_data_len(mdst) < op->sym->auth.data.offset +
269 	       op->sym->auth.data.length + digest->len))
270 		return -EINVAL;
271 
272 	digest->iova = rte_pktmbuf_iova_offset(mdst,
273 					op->sym->auth.data.offset +
274 					op->sym->auth.data.length);
275 	digest->virt = rte_pktmbuf_mtod_offset(mdst, uint8_t *,
276 					op->sym->auth.data.offset +
277 					op->sym->auth.data.length);
278 	return 0;
279 }
280 
281 static void
282 fill_sglist(struct nitrox_sgtable *sgtbl, uint16_t len, rte_iova_t iova,
283 	    void *virt)
284 {
285 	struct nitrox_sglist *sglist = sgtbl->sglist;
286 	uint8_t cnt = sgtbl->map_bufs_cnt;
287 
288 	if (unlikely(!len))
289 		return;
290 
291 	sglist[cnt].len = len;
292 	sglist[cnt].iova = iova;
293 	sglist[cnt].virt = virt;
294 	sgtbl->total_bytes += len;
295 	cnt++;
296 	sgtbl->map_bufs_cnt = cnt;
297 }
298 
299 static int
300 create_sglist_from_mbuf(struct nitrox_sgtable *sgtbl, struct rte_mbuf *mbuf,
301 			uint32_t off, int datalen)
302 {
303 	struct nitrox_sglist *sglist = sgtbl->sglist;
304 	uint8_t cnt = sgtbl->map_bufs_cnt;
305 	struct rte_mbuf *m;
306 	int mlen;
307 
308 	if (unlikely(datalen <= 0))
309 		return 0;
310 
311 	for (m = mbuf; m && off > rte_pktmbuf_data_len(m); m = m->next)
312 		off -= rte_pktmbuf_data_len(m);
313 
314 	if (unlikely(!m))
315 		return -EIO;
316 
317 	mlen = rte_pktmbuf_data_len(m) - off;
318 	if (datalen <= mlen)
319 		mlen = datalen;
320 	sglist[cnt].len = mlen;
321 	sglist[cnt].iova = rte_pktmbuf_iova_offset(m, off);
322 	sglist[cnt].virt = rte_pktmbuf_mtod_offset(m, uint8_t *, off);
323 	sgtbl->total_bytes += mlen;
324 	cnt++;
325 	datalen -= mlen;
326 	for (m = m->next; m && datalen; m = m->next) {
327 		mlen = rte_pktmbuf_data_len(m) < datalen ?
328 			rte_pktmbuf_data_len(m) : datalen;
329 		sglist[cnt].len = mlen;
330 		sglist[cnt].iova = rte_pktmbuf_iova(m);
331 		sglist[cnt].virt = rte_pktmbuf_mtod(m, uint8_t *);
332 		sgtbl->total_bytes += mlen;
333 		cnt++;
334 		datalen -= mlen;
335 	}
336 
337 	RTE_VERIFY(cnt <= MAX_SGBUF_CNT);
338 	sgtbl->map_bufs_cnt = cnt;
339 	return 0;
340 }
341 
342 static int
343 create_cipher_auth_sglist(struct nitrox_softreq *sr,
344 			  struct nitrox_sgtable *sgtbl, struct rte_mbuf *mbuf)
345 {
346 	struct rte_crypto_op *op = sr->op;
347 	int auth_only_len;
348 	int err;
349 
350 	fill_sglist(sgtbl, sr->iv.len, sr->iv.iova, sr->iv.virt);
351 	auth_only_len = op->sym->auth.data.length - op->sym->cipher.data.length;
352 	if (unlikely(auth_only_len < 0))
353 		return -EINVAL;
354 
355 	err = create_sglist_from_mbuf(sgtbl, mbuf, op->sym->auth.data.offset,
356 				      auth_only_len);
357 	if (unlikely(err))
358 		return err;
359 
360 	err = create_sglist_from_mbuf(sgtbl, mbuf, op->sym->cipher.data.offset,
361 				      op->sym->cipher.data.length);
362 	if (unlikely(err))
363 		return err;
364 
365 	return 0;
366 }
367 
368 static void
369 create_sgcomp(struct nitrox_sgtable *sgtbl)
370 {
371 	int i, j, nr_sgcomp;
372 	struct nitrox_sgcomp *sgcomp = sgtbl->sgcomp;
373 	struct nitrox_sglist *sglist = sgtbl->sglist;
374 
375 	nr_sgcomp = RTE_ALIGN_MUL_CEIL(sgtbl->map_bufs_cnt, 4) / 4;
376 	sgtbl->nr_sgcomp = nr_sgcomp;
377 	for (i = 0; i < nr_sgcomp; i++, sgcomp++) {
378 		for (j = 0; j < 4; j++, sglist++) {
379 			sgcomp->len[j] = rte_cpu_to_be_16(sglist->len);
380 			sgcomp->iova[j] = rte_cpu_to_be_64(sglist->iova);
381 		}
382 	}
383 }
384 
385 static int
386 create_cipher_auth_inbuf(struct nitrox_softreq *sr,
387 			 struct nitrox_sglist *digest)
388 {
389 	int err;
390 	struct nitrox_crypto_ctx *ctx = sr->ctx;
391 
392 	err = create_cipher_auth_sglist(sr, &sr->in, sr->op->sym->m_src);
393 	if (unlikely(err))
394 		return err;
395 
396 	if (ctx->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
397 		fill_sglist(&sr->in, digest->len, digest->iova, digest->virt);
398 
399 	create_sgcomp(&sr->in);
400 	sr->dptr = sr->iova + offsetof(struct nitrox_softreq, in.sgcomp);
401 	return 0;
402 }
403 
404 static int
405 create_cipher_auth_oop_outbuf(struct nitrox_softreq *sr,
406 			      struct nitrox_sglist *digest)
407 {
408 	int err;
409 	struct nitrox_crypto_ctx *ctx = sr->ctx;
410 
411 	err = create_cipher_auth_sglist(sr, &sr->out, sr->op->sym->m_dst);
412 	if (unlikely(err))
413 		return err;
414 
415 	if (ctx->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE)
416 		fill_sglist(&sr->out, digest->len, digest->iova, digest->virt);
417 
418 	return 0;
419 }
420 
421 static void
422 create_cipher_auth_inplace_outbuf(struct nitrox_softreq *sr,
423 				  struct nitrox_sglist *digest)
424 {
425 	int i, cnt;
426 	struct nitrox_crypto_ctx *ctx = sr->ctx;
427 
428 	cnt = sr->out.map_bufs_cnt;
429 	for (i = 0; i < sr->in.map_bufs_cnt; i++, cnt++) {
430 		sr->out.sglist[cnt].len = sr->in.sglist[i].len;
431 		sr->out.sglist[cnt].iova = sr->in.sglist[i].iova;
432 		sr->out.sglist[cnt].virt = sr->in.sglist[i].virt;
433 	}
434 
435 	sr->out.map_bufs_cnt = cnt;
436 	if (ctx->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE) {
437 		fill_sglist(&sr->out, digest->len, digest->iova,
438 			    digest->virt);
439 	} else if (ctx->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
440 		sr->out.map_bufs_cnt--;
441 	}
442 }
443 
444 static int
445 create_cipher_auth_outbuf(struct nitrox_softreq *sr,
446 			  struct nitrox_sglist *digest)
447 {
448 	struct rte_crypto_op *op = sr->op;
449 	int cnt = 0;
450 
451 	sr->resp.orh = PENDING_SIG;
452 	sr->out.sglist[cnt].len = sizeof(sr->resp.orh);
453 	sr->out.sglist[cnt].iova = sr->iova + offsetof(struct nitrox_softreq,
454 						       resp.orh);
455 	sr->out.sglist[cnt].virt = &sr->resp.orh;
456 	cnt++;
457 	sr->out.map_bufs_cnt = cnt;
458 	if (op->sym->m_dst) {
459 		int err;
460 
461 		err = create_cipher_auth_oop_outbuf(sr, digest);
462 		if (unlikely(err))
463 			return err;
464 	} else {
465 		create_cipher_auth_inplace_outbuf(sr, digest);
466 	}
467 
468 	cnt = sr->out.map_bufs_cnt;
469 	sr->resp.completion = PENDING_SIG;
470 	sr->out.sglist[cnt].len = sizeof(sr->resp.completion);
471 	sr->out.sglist[cnt].iova = sr->iova + offsetof(struct nitrox_softreq,
472 						     resp.completion);
473 	sr->out.sglist[cnt].virt = &sr->resp.completion;
474 	cnt++;
475 	RTE_VERIFY(cnt <= MAX_SGBUF_CNT);
476 	sr->out.map_bufs_cnt = cnt;
477 
478 	create_sgcomp(&sr->out);
479 	sr->rptr = sr->iova + offsetof(struct nitrox_softreq, out.sgcomp);
480 	return 0;
481 }
482 
483 static void
484 create_aead_gph(uint32_t cryptlen, uint16_t ivlen, uint32_t authlen,
485 		struct gphdr *gph)
486 {
487 	int auth_only_len;
488 	union {
489 		struct {
490 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
491 			uint16_t iv_offset : 8;
492 			uint16_t auth_offset	: 8;
493 #else
494 			uint16_t auth_offset	: 8;
495 			uint16_t iv_offset : 8;
496 #endif
497 		};
498 		uint16_t value;
499 	} param3;
500 
501 	gph->param0 = rte_cpu_to_be_16(cryptlen);
502 	gph->param1 = rte_cpu_to_be_16(authlen);
503 
504 	auth_only_len = authlen - cryptlen;
505 	gph->param2 = rte_cpu_to_be_16(ivlen + auth_only_len);
506 
507 	param3.iv_offset = 0;
508 	param3.auth_offset = ivlen;
509 	gph->param3 = rte_cpu_to_be_16(param3.value);
510 }
511 
512 static int
513 process_cipher_auth_data(struct nitrox_softreq *sr)
514 {
515 	struct rte_crypto_op *op = sr->op;
516 	int err;
517 	struct nitrox_sglist digest;
518 
519 	softreq_copy_iv(sr);
520 	err = extract_cipher_auth_digest(sr, &digest);
521 	if (unlikely(err))
522 		return err;
523 
524 	err = create_cipher_auth_inbuf(sr, &digest);
525 	if (unlikely(err))
526 		return err;
527 
528 	err = create_cipher_auth_outbuf(sr, &digest);
529 	if (unlikely(err))
530 		return err;
531 
532 	create_aead_gph(op->sym->cipher.data.length, sr->iv.len,
533 			op->sym->auth.data.length, &sr->gph);
534 	return 0;
535 }
536 
537 static int
538 process_softreq(struct nitrox_softreq *sr)
539 {
540 	struct nitrox_crypto_ctx *ctx = sr->ctx;
541 	int err = 0;
542 
543 	switch (ctx->nitrox_chain) {
544 	case NITROX_CHAIN_CIPHER_AUTH:
545 	case NITROX_CHAIN_AUTH_CIPHER:
546 		err = process_cipher_auth_data(sr);
547 		break;
548 	default:
549 		err = -EINVAL;
550 		break;
551 	}
552 
553 	return err;
554 }
555 
556 int
557 nitrox_process_se_req(uint16_t qno, struct rte_crypto_op *op,
558 		      struct nitrox_crypto_ctx *ctx,
559 		      struct nitrox_softreq *sr)
560 {
561 	softreq_init(sr, sr->iova);
562 	sr->ctx = ctx;
563 	sr->op = op;
564 	process_softreq(sr);
565 	create_se_instr(sr, qno);
566 	sr->timeout = rte_get_timer_cycles() + CMD_TIMEOUT * rte_get_timer_hz();
567 	return 0;
568 }
569 
570 int
571 nitrox_check_se_req(struct nitrox_softreq *sr, struct rte_crypto_op **op)
572 {
573 	uint64_t cc;
574 	uint64_t orh;
575 	int err;
576 
577 	cc = *(volatile uint64_t *)(&sr->resp.completion);
578 	orh = *(volatile uint64_t *)(&sr->resp.orh);
579 	if (cc != PENDING_SIG)
580 		err = 0;
581 	else if ((orh != PENDING_SIG) && (orh & 0xff))
582 		err = orh & 0xff;
583 	else if (rte_get_timer_cycles() >= sr->timeout)
584 		err = 0xff;
585 	else
586 		return -EAGAIN;
587 
588 	if (unlikely(err))
589 		NITROX_LOG(ERR, "Request err 0x%x, orh 0x%"PRIx64"\n", err,
590 			   sr->resp.orh);
591 
592 	*op = sr->op;
593 	return err;
594 }
595 
596 void *
597 nitrox_sym_instr_addr(struct nitrox_softreq *sr)
598 {
599 	return &sr->instr;
600 }
601 
602 static void
603 req_pool_obj_init(__rte_unused struct rte_mempool *mp,
604 		  __rte_unused void *opaque, void *obj,
605 		  __rte_unused unsigned int obj_idx)
606 {
607 	softreq_init(obj, rte_mempool_virt2iova(obj));
608 }
609 
610 struct rte_mempool *
611 nitrox_sym_req_pool_create(struct rte_cryptodev *cdev, uint32_t nobjs,
612 			   uint16_t qp_id, int socket_id)
613 {
614 	char softreq_pool_name[RTE_RING_NAMESIZE];
615 	struct rte_mempool *mp;
616 
617 	snprintf(softreq_pool_name, RTE_RING_NAMESIZE, "%s_sr_%d",
618 		 cdev->data->name, qp_id);
619 	mp = rte_mempool_create(softreq_pool_name,
620 				RTE_ALIGN_MUL_CEIL(nobjs, 64),
621 				sizeof(struct nitrox_softreq),
622 				64, 0, NULL, NULL, req_pool_obj_init, NULL,
623 				socket_id, 0);
624 	if (unlikely(!mp))
625 		NITROX_LOG(ERR, "Failed to create req pool, qid %d, err %d\n",
626 			   qp_id, rte_errno);
627 
628 	return mp;
629 }
630 
631 void
632 nitrox_sym_req_pool_free(struct rte_mempool *mp)
633 {
634 	rte_mempool_free(mp);
635 }
636