xref: /dpdk/drivers/crypto/ionic/ionic_crypto_ops.c (revision 80518852e2f52bbe27b5763fb05031caf8da4788)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2021-2024 Advanced Micro Devices, Inc.
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <cryptodev_pmd.h>
7 #include <rte_errno.h>
8 #include <rte_malloc.h>
9 #include <rte_mempool.h>
10 
11 #include "ionic_crypto.h"
12 
13 static int
14 iocpt_op_config(struct rte_cryptodev *cdev,
15 		struct rte_cryptodev_config *config __rte_unused)
16 {
17 	struct iocpt_dev *dev = cdev->data->dev_private;
18 
19 	iocpt_configure(dev);
20 
21 	return 0;
22 }
23 
24 static int
25 iocpt_op_start(struct rte_cryptodev *cdev)
26 {
27 	struct iocpt_dev *dev = cdev->data->dev_private;
28 
29 	return iocpt_start(dev);
30 }
31 
32 static void
33 iocpt_op_stop(struct rte_cryptodev *cdev)
34 {
35 	struct iocpt_dev *dev = cdev->data->dev_private;
36 
37 	return iocpt_stop(dev);
38 }
39 
40 static int
41 iocpt_op_close(struct rte_cryptodev *cdev)
42 {
43 	struct iocpt_dev *dev = cdev->data->dev_private;
44 
45 	iocpt_deinit(dev);
46 
47 	return 0;
48 }
49 
50 static void
51 iocpt_op_info_get(struct rte_cryptodev *cdev, struct rte_cryptodev_info *info)
52 {
53 	struct iocpt_dev *dev = cdev->data->dev_private;
54 
55 	if (info == NULL)
56 		return;
57 
58 	info->max_nb_queue_pairs = dev->max_qps;
59 	info->feature_flags = dev->features;
60 	info->capabilities = iocpt_get_caps(info->feature_flags);
61 	info->sym.max_nb_sessions = dev->max_sessions;
62 	info->driver_id = dev->driver_id;
63 	info->min_mbuf_headroom_req = 0;
64 	info->min_mbuf_tailroom_req = 0;
65 }
66 
67 static int
68 iocpt_op_queue_release(struct rte_cryptodev *cdev, uint16_t queue_id)
69 {
70 	struct iocpt_crypto_q *cptq = cdev->data->queue_pairs[queue_id];
71 
72 	IOCPT_PRINT(DEBUG, "queue_id %u", queue_id);
73 
74 	assert(!(cptq->flags & IOCPT_Q_F_INITED));
75 
76 	iocpt_cryptoq_free(cptq);
77 
78 	cdev->data->queue_pairs[queue_id] = NULL;
79 
80 	return 0;
81 }
82 
83 static int
84 iocpt_op_queue_setup(struct rte_cryptodev *cdev, uint16_t queue_id,
85 		const struct rte_cryptodev_qp_conf *qp_conf,
86 		int socket_id)
87 {
88 	struct iocpt_dev *dev = cdev->data->dev_private;
89 	int err;
90 
91 	if (cdev->data->queue_pairs[queue_id] != NULL)
92 		iocpt_op_queue_release(cdev, queue_id);
93 
94 	if (qp_conf->nb_descriptors < (1 << IOCPT_QSIZE_MIN_LG2) ||
95 	    qp_conf->nb_descriptors > (1 << IOCPT_QSIZE_MAX_LG2)) {
96 		IOCPT_PRINT(ERR, "invalid nb_descriptors %u, use range %u..%u",
97 			qp_conf->nb_descriptors,
98 			1 << IOCPT_QSIZE_MIN_LG2, 1 << IOCPT_QSIZE_MAX_LG2);
99 		return -ERANGE;
100 	}
101 
102 	IOCPT_PRINT(DEBUG, "queue_id %u", queue_id);
103 
104 	err = iocpt_cryptoq_alloc(dev, socket_id, queue_id,
105 				qp_conf->nb_descriptors);
106 	if (err != 0)
107 		return err;
108 
109 	cdev->data->queue_pairs[queue_id] = dev->cryptoqs[queue_id];
110 
111 	return 0;
112 }
113 
114 static unsigned int
115 iocpt_op_get_session_size(struct rte_cryptodev *cdev __rte_unused)
116 {
117 	return iocpt_session_size();
118 }
119 
120 static inline int
121 iocpt_is_algo_supported(struct rte_crypto_sym_xform *xform)
122 {
123 	if (xform->next != NULL) {
124 		IOCPT_PRINT(ERR, "chaining not supported");
125 		return -ENOTSUP;
126 	}
127 
128 	if (xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
129 		IOCPT_PRINT(ERR, "xform->type %d not supported", xform->type);
130 		return -ENOTSUP;
131 	}
132 
133 	return 0;
134 }
135 
136 static __rte_always_inline int
137 iocpt_fill_sess_aead(struct rte_crypto_sym_xform *xform,
138 		struct iocpt_session_priv *priv)
139 {
140 	struct rte_crypto_aead_xform *aead_form = &xform->aead;
141 
142 	if (aead_form->algo != RTE_CRYPTO_AEAD_AES_GCM) {
143 		IOCPT_PRINT(ERR, "Unknown algo");
144 		return -EINVAL;
145 	}
146 	if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
147 		priv->op = IOCPT_DESC_OPCODE_GCM_AEAD_ENCRYPT;
148 	} else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
149 		priv->op = IOCPT_DESC_OPCODE_GCM_AEAD_DECRYPT;
150 	} else {
151 		IOCPT_PRINT(ERR, "Unknown cipher operations");
152 		return -1;
153 	}
154 
155 	if (aead_form->key.length < IOCPT_SESS_KEY_LEN_MIN ||
156 	    aead_form->key.length > IOCPT_SESS_KEY_LEN_MAX_SYMM) {
157 		IOCPT_PRINT(ERR, "Invalid cipher keylen %u",
158 			aead_form->key.length);
159 		return -1;
160 	}
161 	priv->key_len = aead_form->key.length;
162 	memcpy(priv->key, aead_form->key.data, priv->key_len);
163 
164 	priv->type = IOCPT_SESS_AEAD_AES_GCM;
165 	priv->iv_offset = aead_form->iv.offset;
166 	priv->iv_length = aead_form->iv.length;
167 	priv->digest_length = aead_form->digest_length;
168 	priv->aad_length = aead_form->aad_length;
169 
170 	return 0;
171 }
172 
173 static int
174 iocpt_session_cfg(struct iocpt_dev *dev,
175 		struct rte_crypto_sym_xform *xform,
176 		struct rte_cryptodev_sym_session *sess)
177 {
178 	struct rte_crypto_sym_xform *chain;
179 	struct iocpt_session_priv *priv = NULL;
180 
181 	if (iocpt_is_algo_supported(xform) < 0)
182 		return -ENOTSUP;
183 
184 	if (unlikely(sess == NULL)) {
185 		IOCPT_PRINT(ERR, "invalid session");
186 		return -EINVAL;
187 	}
188 
189 	priv = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
190 	priv->dev = dev;
191 
192 	chain = xform;
193 	while (chain) {
194 		switch (chain->type) {
195 		case RTE_CRYPTO_SYM_XFORM_AEAD:
196 			if (iocpt_fill_sess_aead(chain, priv))
197 				return -EIO;
198 			break;
199 		default:
200 			IOCPT_PRINT(ERR, "invalid crypto xform type %d",
201 				chain->type);
202 			return -ENOTSUP;
203 		}
204 		chain = chain->next;
205 	}
206 
207 	return iocpt_session_init(priv);
208 }
209 
210 static int
211 iocpt_op_session_cfg(struct rte_cryptodev *cdev,
212 		struct rte_crypto_sym_xform *xform,
213 		struct rte_cryptodev_sym_session *sess)
214 {
215 	struct iocpt_dev *dev = cdev->data->dev_private;
216 
217 	return iocpt_session_cfg(dev, xform, sess);
218 }
219 
220 static void
221 iocpt_session_clear(struct rte_cryptodev_sym_session *sess)
222 {
223 	iocpt_session_deinit(CRYPTODEV_GET_SYM_SESS_PRIV(sess));
224 }
225 
226 static void
227 iocpt_op_session_clear(struct rte_cryptodev *cdev __rte_unused,
228 		struct rte_cryptodev_sym_session *sess)
229 {
230 	iocpt_session_clear(sess);
231 }
232 
233 static inline void
234 iocpt_fill_sge(struct iocpt_crypto_sg_elem *arr, uint8_t idx,
235 		uint64_t addr, uint16_t len)
236 {
237 	arr[idx].addr = rte_cpu_to_le_64(addr);
238 	arr[idx].len = rte_cpu_to_le_16(len);
239 }
240 
241 static __rte_always_inline int
242 iocpt_enq_one_aead(struct iocpt_crypto_q *cptq,
243 		struct iocpt_session_priv *priv, struct rte_crypto_op *op)
244 {
245 	struct rte_crypto_sym_op *sym_op = op->sym;
246 	struct iocpt_queue *q = &cptq->q;
247 	struct iocpt_crypto_desc *desc, *desc_base = q->base;
248 	struct iocpt_crypto_sg_desc *sg_desc, *sg_desc_base = q->sg_base;
249 	struct iocpt_crypto_sg_elem *src, *dst;
250 	rte_iova_t aad_addr, digest_addr, iv_addr, seg_addr;
251 	uint32_t data_len, data_offset, seg_len;
252 	uint8_t nsge_src = 0, nsge_dst = 0, flags = 0;
253 	struct rte_mbuf *m;
254 
255 	desc = &desc_base[q->head_idx];
256 	sg_desc = &sg_desc_base[q->head_idx];
257 	src = sg_desc->src_elems;
258 	dst = sg_desc->dst_elems;
259 
260 	/* Fill the first SGE with the IV / Nonce */
261 	iv_addr = rte_crypto_op_ctophys_offset(op, priv->iv_offset);
262 	iocpt_fill_sge(src, nsge_src++, iv_addr, priv->iv_length);
263 
264 	/* Fill the second SGE with the AAD, if applicable */
265 	if (priv->aad_length > 0) {
266 		aad_addr = sym_op->aead.aad.phys_addr;
267 		iocpt_fill_sge(src, nsge_src++, aad_addr, priv->aad_length);
268 		flags |= IOCPT_DESC_F_AAD_VALID;
269 	}
270 
271 	m = sym_op->m_src;
272 	data_len = sym_op->aead.data.length;
273 
274 	/* Fast-forward through mbuf chain to account for data offset */
275 	data_offset = sym_op->aead.data.offset;
276 	while (m != NULL && data_offset >= m->data_len) {
277 		data_offset -= m->data_len;
278 		m = m->next;
279 	}
280 
281 	/* Fill the next SGEs with the payload segments */
282 	while (m != NULL && data_len > 0) {
283 		seg_addr = rte_mbuf_data_iova(m) + data_offset;
284 		seg_len = RTE_MIN(m->data_len - data_offset, data_len);
285 		data_offset = 0;
286 		data_len -= seg_len;
287 
288 		/* Use -1 to save room for digest */
289 		if (nsge_src >= IOCPT_CRYPTO_MAX_SG_ELEMS - 1)
290 			return -ERANGE;
291 
292 		iocpt_fill_sge(src, nsge_src++, seg_addr, seg_len);
293 
294 		m = m->next;
295 	}
296 
297 	/* AEAD AES-GCM: digest == authentication tag */
298 	digest_addr = sym_op->aead.digest.phys_addr;
299 	iocpt_fill_sge(src, nsge_src++, digest_addr, priv->digest_length);
300 
301 	/* Process Out-Of-Place destination SGL */
302 	if (sym_op->m_dst != NULL) {
303 		/* Put the AAD here, too */
304 		if (priv->aad_length > 0)
305 			iocpt_fill_sge(dst, nsge_dst++,
306 				sym_op->aead.aad.phys_addr, priv->aad_length);
307 
308 		m = sym_op->m_dst;
309 		data_len = sym_op->aead.data.length;
310 
311 		/* Fast-forward through chain to account for data offset */
312 		data_offset = sym_op->aead.data.offset;
313 		while (m != NULL && data_offset >= m->data_len) {
314 			data_offset -= m->data_len;
315 			m = m->next;
316 		}
317 
318 		/* Fill in the SGEs with the payload segments */
319 		while (m != NULL && data_len > 0) {
320 			seg_addr = rte_mbuf_data_iova(m) + data_offset;
321 			seg_len = RTE_MIN(m->data_len - data_offset, data_len);
322 			data_offset = 0;
323 			data_len -= seg_len;
324 
325 			if (nsge_dst >= IOCPT_CRYPTO_MAX_SG_ELEMS)
326 				return -ERANGE;
327 
328 			iocpt_fill_sge(dst, nsge_dst++, seg_addr, seg_len);
329 
330 			m = m->next;
331 		}
332 	}
333 
334 	desc->opcode = priv->op;
335 	desc->flags = flags;
336 	desc->num_src_dst_sgs = iocpt_encode_nsge_src_dst(nsge_src, nsge_dst);
337 	desc->session_tag = rte_cpu_to_le_32(priv->index);
338 
339 	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
340 	q->info[q->head_idx] = op;
341 	q->head_idx = Q_NEXT_TO_POST(q, 1);
342 
343 	return 0;
344 }
345 
346 static uint16_t
347 iocpt_enqueue_sym(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
348 {
349 	struct iocpt_crypto_q *cptq = qp;
350 	struct rte_crypto_op *op;
351 	struct iocpt_session_priv *priv;
352 	uint16_t avail, count;
353 	int err;
354 
355 	avail = iocpt_q_space_avail(&cptq->q);
356 	if (unlikely(nb_ops > avail))
357 		nb_ops = avail;
358 
359 	count = 0;
360 	while (likely(count < nb_ops)) {
361 		op = ops[count];
362 
363 		if (unlikely(op->sess_type != RTE_CRYPTO_OP_WITH_SESSION)) {
364 			op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
365 			break;
366 		}
367 
368 		priv = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
369 		if (unlikely(priv == NULL)) {
370 			op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
371 			break;
372 		}
373 
374 		err = iocpt_enq_one_aead(cptq, priv, op);
375 		if (unlikely(err != 0)) {
376 			op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
377 			break;
378 		}
379 
380 		count++;
381 	}
382 
383 	if (likely(count > 0))
384 		iocpt_q_flush(&cptq->q);
385 
386 	return count;
387 }
388 
389 static uint16_t
390 iocpt_dequeue_sym(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
391 {
392 	struct iocpt_crypto_q *cptq = qp;
393 	struct iocpt_queue *q = &cptq->q;
394 	struct iocpt_cq *cq = &cptq->cq;
395 	struct rte_crypto_op *op;
396 	struct iocpt_crypto_comp *cq_desc_base = cq->base;
397 	volatile struct iocpt_crypto_comp *cq_desc;
398 	uint16_t count = 0;
399 
400 	cq_desc = &cq_desc_base[cq->tail_idx];
401 
402 	/* First walk the CQ to update any completed op's status
403 	 * NB: These can arrive out of order!
404 	 */
405 	while ((cq_desc->color & 0x1) == cq->done_color) {
406 		cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
407 		if (unlikely(cq->tail_idx == 0))
408 			cq->done_color = !cq->done_color;
409 
410 		op = q->info[rte_le_to_cpu_16(cq_desc->comp_index)];
411 
412 		/* Process returned CQ descriptor status */
413 		if (unlikely(cq_desc->status)) {
414 			switch (cq_desc->status) {
415 			case IOCPT_COMP_SYMM_AUTH_VERIFY_ERROR:
416 				op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
417 				break;
418 			case IOCPT_COMP_INVAL_OPCODE_ERROR:
419 			case IOCPT_COMP_UNSUPP_OPCODE_ERROR:
420 			case IOCPT_COMP_SYMM_SRC_SG_ERROR:
421 			case IOCPT_COMP_SYMM_DST_SG_ERROR:
422 			case IOCPT_COMP_SYMM_SRC_DST_LEN_MISMATCH:
423 			case IOCPT_COMP_SYMM_KEY_IDX_ERROR:
424 				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
425 				break;
426 			default:
427 				op->status = RTE_CRYPTO_OP_STATUS_ERROR;
428 				break;
429 			}
430 		} else
431 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
432 
433 		cq_desc = &cq_desc_base[cq->tail_idx];
434 	}
435 
436 	/* Next walk the SQ to pop off completed ops in-order */
437 	while (count < nb_ops) {
438 		op = q->info[q->tail_idx];
439 
440 		/* No more completions */
441 		if (op == NULL ||
442 		    op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
443 			break;
444 
445 		ops[count] = op;
446 		q->info[q->tail_idx] = NULL;
447 
448 		q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
449 		count++;
450 	}
451 
452 	return count;
453 }
454 
455 static struct rte_cryptodev_ops iocpt_ops = {
456 	.dev_configure = iocpt_op_config,
457 	.dev_start = iocpt_op_start,
458 	.dev_stop = iocpt_op_stop,
459 	.dev_close = iocpt_op_close,
460 	.dev_infos_get = iocpt_op_info_get,
461 
462 	.queue_pair_setup = iocpt_op_queue_setup,
463 	.queue_pair_release = iocpt_op_queue_release,
464 
465 	.sym_session_get_size = iocpt_op_get_session_size,
466 	.sym_session_configure = iocpt_op_session_cfg,
467 	.sym_session_clear = iocpt_op_session_clear,
468 };
469 
470 int
471 iocpt_assign_ops(struct rte_cryptodev *cdev)
472 {
473 	struct iocpt_dev *dev = cdev->data->dev_private;
474 
475 	cdev->dev_ops = &iocpt_ops;
476 	cdev->feature_flags = dev->features;
477 
478 	if (dev->features & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
479 		cdev->enqueue_burst = iocpt_enqueue_sym;
480 		cdev->dequeue_burst = iocpt_dequeue_sym;
481 	}
482 
483 	return 0;
484 }
485