xref: /dpdk/drivers/crypto/ionic/ionic_crypto_ops.c (revision 54d56aba175f19918975bf5e88f5e39aca41c9bc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2021-2024 Advanced Micro Devices, Inc.
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <cryptodev_pmd.h>
7 #include <rte_errno.h>
8 #include <rte_malloc.h>
9 #include <rte_mempool.h>
10 
11 #include "ionic_crypto.h"
12 
13 static int
14 iocpt_op_config(struct rte_cryptodev *cdev,
15 		struct rte_cryptodev_config *config __rte_unused)
16 {
17 	struct iocpt_dev *dev = cdev->data->dev_private;
18 
19 	iocpt_configure(dev);
20 
21 	return 0;
22 }
23 
24 static int
25 iocpt_op_start(struct rte_cryptodev *cdev)
26 {
27 	struct iocpt_dev *dev = cdev->data->dev_private;
28 
29 	return iocpt_start(dev);
30 }
31 
32 static void
33 iocpt_op_stop(struct rte_cryptodev *cdev)
34 {
35 	struct iocpt_dev *dev = cdev->data->dev_private;
36 
37 	return iocpt_stop(dev);
38 }
39 
40 static int
41 iocpt_op_close(struct rte_cryptodev *cdev)
42 {
43 	struct iocpt_dev *dev = cdev->data->dev_private;
44 
45 	iocpt_deinit(dev);
46 
47 	return 0;
48 }
49 
50 static void
51 iocpt_op_info_get(struct rte_cryptodev *cdev, struct rte_cryptodev_info *info)
52 {
53 	struct iocpt_dev *dev = cdev->data->dev_private;
54 
55 	if (info == NULL)
56 		return;
57 
58 	info->max_nb_queue_pairs = dev->max_qps;
59 	info->feature_flags = dev->features;
60 	info->capabilities = iocpt_get_caps(info->feature_flags);
61 	/* Reserve one session for watchdog */
62 	info->sym.max_nb_sessions = dev->max_sessions - 1;
63 	info->driver_id = dev->driver_id;
64 	info->min_mbuf_headroom_req = 0;
65 	info->min_mbuf_tailroom_req = 0;
66 }
67 
68 static int
69 iocpt_op_queue_release(struct rte_cryptodev *cdev, uint16_t queue_id)
70 {
71 	struct iocpt_crypto_q *cptq = cdev->data->queue_pairs[queue_id];
72 
73 	IOCPT_PRINT(DEBUG, "queue_id %u", queue_id);
74 
75 	assert(!(cptq->flags & IOCPT_Q_F_INITED));
76 
77 	iocpt_cryptoq_free(cptq);
78 
79 	cdev->data->queue_pairs[queue_id] = NULL;
80 
81 	return 0;
82 }
83 
84 static int
85 iocpt_op_queue_setup(struct rte_cryptodev *cdev, uint16_t queue_id,
86 		const struct rte_cryptodev_qp_conf *qp_conf,
87 		int socket_id)
88 {
89 	struct iocpt_dev *dev = cdev->data->dev_private;
90 	int err;
91 
92 	if (cdev->data->queue_pairs[queue_id] != NULL)
93 		iocpt_op_queue_release(cdev, queue_id);
94 
95 	if (qp_conf->nb_descriptors < (1 << IOCPT_QSIZE_MIN_LG2) ||
96 	    qp_conf->nb_descriptors > (1 << IOCPT_QSIZE_MAX_LG2)) {
97 		IOCPT_PRINT(ERR, "invalid nb_descriptors %u, use range %u..%u",
98 			qp_conf->nb_descriptors,
99 			1 << IOCPT_QSIZE_MIN_LG2, 1 << IOCPT_QSIZE_MAX_LG2);
100 		return -ERANGE;
101 	}
102 
103 	IOCPT_PRINT(DEBUG, "queue_id %u", queue_id);
104 
105 	err = iocpt_cryptoq_alloc(dev, socket_id, queue_id,
106 				qp_conf->nb_descriptors);
107 	if (err != 0)
108 		return err;
109 
110 	cdev->data->queue_pairs[queue_id] = dev->cryptoqs[queue_id];
111 
112 	return 0;
113 }
114 
115 static unsigned int
116 iocpt_op_get_session_size(struct rte_cryptodev *cdev __rte_unused)
117 {
118 	return iocpt_session_size();
119 }
120 
121 static inline int
122 iocpt_is_algo_supported(struct rte_crypto_sym_xform *xform)
123 {
124 	if (xform->next != NULL) {
125 		IOCPT_PRINT(ERR, "chaining not supported");
126 		return -ENOTSUP;
127 	}
128 
129 	if (xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
130 		IOCPT_PRINT(ERR, "xform->type %d not supported", xform->type);
131 		return -ENOTSUP;
132 	}
133 
134 	return 0;
135 }
136 
137 static __rte_always_inline int
138 iocpt_fill_sess_aead(struct rte_crypto_sym_xform *xform,
139 		struct iocpt_session_priv *priv)
140 {
141 	struct rte_crypto_aead_xform *aead_form = &xform->aead;
142 
143 	if (aead_form->algo != RTE_CRYPTO_AEAD_AES_GCM) {
144 		IOCPT_PRINT(ERR, "Unknown algo");
145 		return -EINVAL;
146 	}
147 	if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
148 		priv->op = IOCPT_DESC_OPCODE_GCM_AEAD_ENCRYPT;
149 	} else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
150 		priv->op = IOCPT_DESC_OPCODE_GCM_AEAD_DECRYPT;
151 	} else {
152 		IOCPT_PRINT(ERR, "Unknown cipher operations");
153 		return -1;
154 	}
155 
156 	if (aead_form->key.length < IOCPT_SESS_KEY_LEN_MIN ||
157 	    aead_form->key.length > IOCPT_SESS_KEY_LEN_MAX_SYMM) {
158 		IOCPT_PRINT(ERR, "Invalid cipher keylen %u",
159 			aead_form->key.length);
160 		return -1;
161 	}
162 	priv->key_len = aead_form->key.length;
163 	memcpy(priv->key, aead_form->key.data, priv->key_len);
164 
165 	priv->type = IOCPT_SESS_AEAD_AES_GCM;
166 	priv->iv_offset = aead_form->iv.offset;
167 	priv->iv_length = aead_form->iv.length;
168 	priv->digest_length = aead_form->digest_length;
169 	priv->aad_length = aead_form->aad_length;
170 
171 	return 0;
172 }
173 
174 static int
175 iocpt_session_cfg(struct iocpt_dev *dev,
176 		struct rte_crypto_sym_xform *xform,
177 		struct rte_cryptodev_sym_session *sess)
178 {
179 	struct rte_crypto_sym_xform *chain;
180 	struct iocpt_session_priv *priv = NULL;
181 
182 	if (iocpt_is_algo_supported(xform) < 0)
183 		return -ENOTSUP;
184 
185 	if (unlikely(sess == NULL)) {
186 		IOCPT_PRINT(ERR, "invalid session");
187 		return -EINVAL;
188 	}
189 
190 	priv = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
191 	priv->dev = dev;
192 
193 	chain = xform;
194 	while (chain) {
195 		switch (chain->type) {
196 		case RTE_CRYPTO_SYM_XFORM_AEAD:
197 			if (iocpt_fill_sess_aead(chain, priv))
198 				return -EIO;
199 			break;
200 		default:
201 			IOCPT_PRINT(ERR, "invalid crypto xform type %d",
202 				chain->type);
203 			return -ENOTSUP;
204 		}
205 		chain = chain->next;
206 	}
207 
208 	return iocpt_session_init(priv);
209 }
210 
211 static int
212 iocpt_op_session_cfg(struct rte_cryptodev *cdev,
213 		struct rte_crypto_sym_xform *xform,
214 		struct rte_cryptodev_sym_session *sess)
215 {
216 	struct iocpt_dev *dev = cdev->data->dev_private;
217 
218 	return iocpt_session_cfg(dev, xform, sess);
219 }
220 
221 static void
222 iocpt_session_clear(struct rte_cryptodev_sym_session *sess)
223 {
224 	iocpt_session_deinit(CRYPTODEV_GET_SYM_SESS_PRIV(sess));
225 }
226 
227 static void
228 iocpt_op_session_clear(struct rte_cryptodev *cdev __rte_unused,
229 		struct rte_cryptodev_sym_session *sess)
230 {
231 	iocpt_session_clear(sess);
232 }
233 
234 static inline void
235 iocpt_fill_sge(struct iocpt_crypto_sg_elem *arr, uint8_t idx,
236 		uint64_t addr, uint16_t len)
237 {
238 	arr[idx].addr = rte_cpu_to_le_64(addr);
239 	arr[idx].len = rte_cpu_to_le_16(len);
240 }
241 
242 static __rte_always_inline int
243 iocpt_enq_one_aead(struct iocpt_crypto_q *cptq,
244 		struct iocpt_session_priv *priv, struct rte_crypto_op *op)
245 {
246 	struct rte_crypto_sym_op *sym_op = op->sym;
247 	struct iocpt_queue *q = &cptq->q;
248 	struct iocpt_crypto_desc *desc, *desc_base = q->base;
249 	struct iocpt_crypto_sg_desc *sg_desc, *sg_desc_base = q->sg_base;
250 	struct iocpt_crypto_sg_elem *src, *dst;
251 	rte_iova_t aad_addr, digest_addr, iv_addr, seg_addr;
252 	uint32_t data_len, data_offset, seg_len;
253 	uint8_t nsge_src = 0, nsge_dst = 0, flags = 0;
254 	struct rte_mbuf *m;
255 
256 	desc = &desc_base[q->head_idx];
257 	sg_desc = &sg_desc_base[q->head_idx];
258 	src = sg_desc->src_elems;
259 	dst = sg_desc->dst_elems;
260 
261 	/* Fill the first SGE with the IV / Nonce */
262 	iv_addr = rte_crypto_op_ctophys_offset(op, priv->iv_offset);
263 	iocpt_fill_sge(src, nsge_src++, iv_addr, priv->iv_length);
264 
265 	/* Fill the second SGE with the AAD, if applicable */
266 	if (priv->aad_length > 0) {
267 		aad_addr = sym_op->aead.aad.phys_addr;
268 		iocpt_fill_sge(src, nsge_src++, aad_addr, priv->aad_length);
269 		flags |= IOCPT_DESC_F_AAD_VALID;
270 	}
271 
272 	m = sym_op->m_src;
273 	data_len = sym_op->aead.data.length;
274 
275 	/* Fast-forward through mbuf chain to account for data offset */
276 	data_offset = sym_op->aead.data.offset;
277 	while (m != NULL && data_offset >= m->data_len) {
278 		data_offset -= m->data_len;
279 		m = m->next;
280 	}
281 
282 	/* Fill the next SGEs with the payload segments */
283 	while (m != NULL && data_len > 0) {
284 		seg_addr = rte_mbuf_data_iova(m) + data_offset;
285 		seg_len = RTE_MIN(m->data_len - data_offset, data_len);
286 		data_offset = 0;
287 		data_len -= seg_len;
288 
289 		/* Use -1 to save room for digest */
290 		if (nsge_src >= IOCPT_CRYPTO_MAX_SG_ELEMS - 1)
291 			return -ERANGE;
292 
293 		iocpt_fill_sge(src, nsge_src++, seg_addr, seg_len);
294 
295 		m = m->next;
296 	}
297 
298 	/* AEAD AES-GCM: digest == authentication tag */
299 	digest_addr = sym_op->aead.digest.phys_addr;
300 	iocpt_fill_sge(src, nsge_src++, digest_addr, priv->digest_length);
301 
302 	/* Process Out-Of-Place destination SGL */
303 	if (sym_op->m_dst != NULL) {
304 		/* Put the AAD here, too */
305 		if (priv->aad_length > 0)
306 			iocpt_fill_sge(dst, nsge_dst++,
307 				sym_op->aead.aad.phys_addr, priv->aad_length);
308 
309 		m = sym_op->m_dst;
310 		data_len = sym_op->aead.data.length;
311 
312 		/* Fast-forward through chain to account for data offset */
313 		data_offset = sym_op->aead.data.offset;
314 		while (m != NULL && data_offset >= m->data_len) {
315 			data_offset -= m->data_len;
316 			m = m->next;
317 		}
318 
319 		/* Fill in the SGEs with the payload segments */
320 		while (m != NULL && data_len > 0) {
321 			seg_addr = rte_mbuf_data_iova(m) + data_offset;
322 			seg_len = RTE_MIN(m->data_len - data_offset, data_len);
323 			data_offset = 0;
324 			data_len -= seg_len;
325 
326 			if (nsge_dst >= IOCPT_CRYPTO_MAX_SG_ELEMS)
327 				return -ERANGE;
328 
329 			iocpt_fill_sge(dst, nsge_dst++, seg_addr, seg_len);
330 
331 			m = m->next;
332 		}
333 	}
334 
335 	desc->opcode = priv->op;
336 	desc->flags = flags;
337 	desc->num_src_dst_sgs = iocpt_encode_nsge_src_dst(nsge_src, nsge_dst);
338 	desc->session_tag = rte_cpu_to_le_32(priv->index);
339 
340 	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
341 	q->info[q->head_idx] = op;
342 	q->head_idx = Q_NEXT_TO_POST(q, 1);
343 
344 	return 0;
345 }
346 
347 static uint16_t
348 iocpt_enqueue_sym(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
349 {
350 	struct iocpt_crypto_q *cptq = qp;
351 	struct rte_crypto_op *op;
352 	struct iocpt_session_priv *priv;
353 	uint16_t avail, count;
354 	int err;
355 
356 	avail = iocpt_q_space_avail(&cptq->q);
357 	if (unlikely(nb_ops > avail))
358 		nb_ops = avail;
359 
360 	count = 0;
361 	while (likely(count < nb_ops)) {
362 		op = ops[count];
363 
364 		if (unlikely(op->sess_type != RTE_CRYPTO_OP_WITH_SESSION)) {
365 			op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
366 			break;
367 		}
368 
369 		priv = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
370 		if (unlikely(priv == NULL)) {
371 			op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
372 			break;
373 		}
374 
375 		err = iocpt_enq_one_aead(cptq, priv, op);
376 		if (unlikely(err != 0)) {
377 			op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
378 			break;
379 		}
380 
381 		count++;
382 	}
383 
384 	if (likely(count > 0)) {
385 		iocpt_q_flush(&cptq->q);
386 
387 		/* Restart timer if ops are being enqueued */
388 		cptq->last_wdog_cycles = rte_get_timer_cycles();
389 	}
390 
391 	return count;
392 }
393 
394 static void
395 iocpt_enqueue_wdog(struct iocpt_crypto_q *cptq)
396 {
397 	struct iocpt_queue *q = &cptq->q;
398 	struct iocpt_crypto_desc *desc, *desc_base = q->base;
399 	struct iocpt_crypto_sg_desc *sg_desc, *sg_desc_base = q->sg_base;
400 	struct iocpt_crypto_sg_elem *src;
401 	struct rte_crypto_op *wdog_op;
402 	rte_iova_t iv_addr, pld_addr, tag_addr;
403 	uint8_t nsge_src = 0;
404 	uint16_t avail;
405 
406 	avail = iocpt_q_space_avail(&cptq->q);
407 	if (avail < 1)
408 		goto out_flush;
409 
410 	wdog_op = rte_zmalloc_socket("iocpt", sizeof(*wdog_op),
411 				RTE_CACHE_LINE_SIZE, rte_socket_id());
412 	if (wdog_op == NULL)
413 		goto out_flush;
414 
415 	wdog_op->type = IOCPT_Q_WDOG_OP_TYPE;
416 	wdog_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
417 
418 	desc = &desc_base[q->head_idx];
419 	sg_desc = &sg_desc_base[q->head_idx];
420 	src = sg_desc->src_elems;
421 
422 	/* Fill the first SGE with the IV / Nonce */
423 	iv_addr = rte_mem_virt2iova(cptq->wdog_iv);
424 	iocpt_fill_sge(src, nsge_src++, iv_addr, IOCPT_Q_WDOG_IV_LEN);
425 
426 	/* Fill the second SGE with the payload segment */
427 	pld_addr = rte_mem_virt2iova(cptq->wdog_pld);
428 	iocpt_fill_sge(src, nsge_src++, pld_addr, IOCPT_Q_WDOG_PLD_LEN);
429 
430 	/* AEAD AES-GCM: digest == authentication tag */
431 	tag_addr = rte_mem_virt2iova(cptq->wdog_tag);
432 	iocpt_fill_sge(src, nsge_src++, tag_addr, IOCPT_Q_WDOG_TAG_LEN);
433 
434 	desc->opcode = IOCPT_DESC_OPCODE_GCM_AEAD_ENCRYPT;
435 	desc->flags = 0;
436 	desc->num_src_dst_sgs = iocpt_encode_nsge_src_dst(nsge_src, 0);
437 	desc->session_tag = rte_cpu_to_le_32(IOCPT_Q_WDOG_SESS_IDX);
438 
439 	q->info[q->head_idx] = wdog_op;
440 	q->head_idx = Q_NEXT_TO_POST(q, 1);
441 
442 	IOCPT_PRINT(DEBUG, "Queue %u wdog enq %p",
443 		q->index, wdog_op);
444 	cptq->enqueued_wdogs++;
445 
446 out_flush:
447 	iocpt_q_flush(q);
448 }
449 
450 static uint16_t
451 iocpt_dequeue_sym(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
452 {
453 	struct iocpt_crypto_q *cptq = qp;
454 	struct iocpt_queue *q = &cptq->q;
455 	struct iocpt_cq *cq = &cptq->cq;
456 	struct rte_crypto_op *op;
457 	struct iocpt_crypto_comp *cq_desc_base = cq->base;
458 	volatile struct iocpt_crypto_comp *cq_desc;
459 	uint64_t then, now, hz, delta;
460 	uint16_t count = 0;
461 
462 	cq_desc = &cq_desc_base[cq->tail_idx];
463 
464 	/* First walk the CQ to update any completed op's status
465 	 * NB: These can arrive out of order!
466 	 */
467 	while ((cq_desc->color & 0x1) == cq->done_color) {
468 		cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
469 		if (unlikely(cq->tail_idx == 0))
470 			cq->done_color = !cq->done_color;
471 
472 		op = q->info[rte_le_to_cpu_16(cq_desc->comp_index)];
473 
474 		/* Process returned CQ descriptor status */
475 		if (unlikely(cq_desc->status)) {
476 			switch (cq_desc->status) {
477 			case IOCPT_COMP_SYMM_AUTH_VERIFY_ERROR:
478 				op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
479 				break;
480 			case IOCPT_COMP_INVAL_OPCODE_ERROR:
481 			case IOCPT_COMP_UNSUPP_OPCODE_ERROR:
482 			case IOCPT_COMP_SYMM_SRC_SG_ERROR:
483 			case IOCPT_COMP_SYMM_DST_SG_ERROR:
484 			case IOCPT_COMP_SYMM_SRC_DST_LEN_MISMATCH:
485 			case IOCPT_COMP_SYMM_KEY_IDX_ERROR:
486 				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
487 				break;
488 			default:
489 				op->status = RTE_CRYPTO_OP_STATUS_ERROR;
490 				break;
491 			}
492 		} else
493 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
494 
495 		cq_desc = &cq_desc_base[cq->tail_idx];
496 	}
497 
498 	/* Next walk the SQ to pop off completed ops in-order */
499 	while (count < nb_ops) {
500 		op = q->info[q->tail_idx];
501 
502 		/* No more completions */
503 		if (op == NULL ||
504 		    op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
505 			break;
506 
507 		/* Handle watchdog operations */
508 		if (unlikely(op->type == IOCPT_Q_WDOG_OP_TYPE)) {
509 			IOCPT_PRINT(DEBUG, "Queue %u wdog deq %p st %d",
510 				q->index, op, op->status);
511 			q->info[q->tail_idx] = NULL;
512 			q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
513 			cptq->dequeued_wdogs++;
514 			rte_free(op);
515 			continue;
516 		}
517 
518 		ops[count] = op;
519 		q->info[q->tail_idx] = NULL;
520 
521 		q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
522 		count++;
523 	}
524 
525 	if (!count) {
526 		/*
527 		 * Ring the doorbell again if no work was dequeued and work
528 		 * is still pending after the deadline.
529 		 */
530 		if (q->head_idx != q->tail_idx) {
531 			then = cptq->last_wdog_cycles;
532 			now = rte_get_timer_cycles();
533 			hz = rte_get_timer_hz();
534 			delta = (now - then) * 1000;
535 
536 			if (delta >= hz * IONIC_Q_WDOG_MS) {
537 				iocpt_enqueue_wdog(cptq);
538 				cptq->last_wdog_cycles = now;
539 			}
540 		}
541 	} else
542 		/* Restart timer if the queue is making progress */
543 		cptq->last_wdog_cycles = rte_get_timer_cycles();
544 
545 	return count;
546 }
547 
548 static struct rte_cryptodev_ops iocpt_ops = {
549 	.dev_configure = iocpt_op_config,
550 	.dev_start = iocpt_op_start,
551 	.dev_stop = iocpt_op_stop,
552 	.dev_close = iocpt_op_close,
553 	.dev_infos_get = iocpt_op_info_get,
554 
555 	.queue_pair_setup = iocpt_op_queue_setup,
556 	.queue_pair_release = iocpt_op_queue_release,
557 
558 	.sym_session_get_size = iocpt_op_get_session_size,
559 	.sym_session_configure = iocpt_op_session_cfg,
560 	.sym_session_clear = iocpt_op_session_clear,
561 };
562 
563 int
564 iocpt_assign_ops(struct rte_cryptodev *cdev)
565 {
566 	struct iocpt_dev *dev = cdev->data->dev_private;
567 
568 	cdev->dev_ops = &iocpt_ops;
569 	cdev->feature_flags = dev->features;
570 
571 	if (dev->features & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
572 		cdev->enqueue_burst = iocpt_enqueue_sym;
573 		cdev->dequeue_burst = iocpt_dequeue_sym;
574 	}
575 
576 	return 0;
577 }
578