xref: /dpdk/drivers/crypto/octeontx/otx_cryptodev_ops.c (revision 9c4491cf5c89a015cf358011151a92b9cba78560)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Cavium, Inc
3  */
4 
5 #include <rte_alarm.h>
6 #include <rte_bus_pci.h>
7 #include <rte_cryptodev.h>
8 #include <rte_cryptodev_pmd.h>
9 #include <rte_malloc.h>
10 
11 #include "cpt_pmd_logs.h"
12 #include "cpt_pmd_ops_helper.h"
13 #include "cpt_ucode.h"
14 #include "cpt_request_mgr.h"
15 
16 #include "otx_cryptodev.h"
17 #include "otx_cryptodev_capabilities.h"
18 #include "otx_cryptodev_hw_access.h"
19 #include "otx_cryptodev_ops.h"
20 
21 static int otx_cryptodev_probe_count;
22 static rte_spinlock_t otx_probe_count_lock = RTE_SPINLOCK_INITIALIZER;
23 
24 static struct rte_mempool *otx_cpt_meta_pool;
25 static int otx_cpt_op_mlen;
26 static int otx_cpt_op_sb_mlen;
27 
28 /* Forward declarations */
29 
30 static int
31 otx_cpt_que_pair_release(struct rte_cryptodev *dev, uint16_t que_pair_id);
32 
33 /*
34  * Initializes global variables used by fast-path code
35  *
36  * @return
37  *   - 0 on success, errcode on error
38  */
39 static int
40 init_global_resources(void)
41 {
42 	/* Get meta len for scatter gather mode */
43 	otx_cpt_op_mlen = cpt_pmd_ops_helper_get_mlen_sg_mode();
44 
45 	/* Extra 4B saved for future considerations */
46 	otx_cpt_op_mlen += 4 * sizeof(uint64_t);
47 
48 	otx_cpt_meta_pool = rte_mempool_create("cpt_metabuf-pool", 4096 * 16,
49 					       otx_cpt_op_mlen, 512, 0,
50 					       NULL, NULL, NULL, NULL,
51 					       SOCKET_ID_ANY, 0);
52 	if (!otx_cpt_meta_pool) {
53 		CPT_LOG_ERR("cpt metabuf pool not created");
54 		return -ENOMEM;
55 	}
56 
57 	/* Get meta len for direct mode */
58 	otx_cpt_op_sb_mlen = cpt_pmd_ops_helper_get_mlen_direct_mode();
59 
60 	/* Extra 4B saved for future considerations */
61 	otx_cpt_op_sb_mlen += 4 * sizeof(uint64_t);
62 
63 	return 0;
64 }
65 
66 void
67 cleanup_global_resources(void)
68 {
69 	/* Take lock */
70 	rte_spinlock_lock(&otx_probe_count_lock);
71 
72 	/* Decrement the cryptodev count */
73 	otx_cryptodev_probe_count--;
74 
75 	/* Free buffers */
76 	if (otx_cpt_meta_pool && otx_cryptodev_probe_count == 0)
77 		rte_mempool_free(otx_cpt_meta_pool);
78 
79 	/* Free lock */
80 	rte_spinlock_unlock(&otx_probe_count_lock);
81 }
82 
83 /* Alarm routines */
84 
85 static void
86 otx_cpt_alarm_cb(void *arg)
87 {
88 	struct cpt_vf *cptvf = arg;
89 	otx_cpt_poll_misc(cptvf);
90 	rte_eal_alarm_set(CPT_INTR_POLL_INTERVAL_MS * 1000,
91 			  otx_cpt_alarm_cb, cptvf);
92 }
93 
94 static int
95 otx_cpt_periodic_alarm_start(void *arg)
96 {
97 	return rte_eal_alarm_set(CPT_INTR_POLL_INTERVAL_MS * 1000,
98 				 otx_cpt_alarm_cb, arg);
99 }
100 
101 static int
102 otx_cpt_periodic_alarm_stop(void *arg)
103 {
104 	return rte_eal_alarm_cancel(otx_cpt_alarm_cb, arg);
105 }
106 
107 /* PMD ops */
108 
109 static int
110 otx_cpt_dev_config(struct rte_cryptodev *dev __rte_unused,
111 		   struct rte_cryptodev_config *config __rte_unused)
112 {
113 	CPT_PMD_INIT_FUNC_TRACE();
114 	return 0;
115 }
116 
117 static int
118 otx_cpt_dev_start(struct rte_cryptodev *c_dev)
119 {
120 	void *cptvf = c_dev->data->dev_private;
121 
122 	CPT_PMD_INIT_FUNC_TRACE();
123 
124 	return otx_cpt_start_device(cptvf);
125 }
126 
127 static void
128 otx_cpt_dev_stop(struct rte_cryptodev *c_dev)
129 {
130 	void *cptvf = c_dev->data->dev_private;
131 
132 	CPT_PMD_INIT_FUNC_TRACE();
133 
134 	otx_cpt_stop_device(cptvf);
135 }
136 
137 static int
138 otx_cpt_dev_close(struct rte_cryptodev *c_dev)
139 {
140 	void *cptvf = c_dev->data->dev_private;
141 	int i, ret;
142 
143 	CPT_PMD_INIT_FUNC_TRACE();
144 
145 	for (i = 0; i < c_dev->data->nb_queue_pairs; i++) {
146 		ret = otx_cpt_que_pair_release(c_dev, i);
147 		if (ret)
148 			return ret;
149 	}
150 
151 	otx_cpt_periodic_alarm_stop(cptvf);
152 	otx_cpt_deinit_device(cptvf);
153 
154 	return 0;
155 }
156 
157 static void
158 otx_cpt_dev_info_get(struct rte_cryptodev *dev, struct rte_cryptodev_info *info)
159 {
160 	CPT_PMD_INIT_FUNC_TRACE();
161 	if (info != NULL) {
162 		info->max_nb_queue_pairs = CPT_NUM_QS_PER_VF;
163 		info->feature_flags = dev->feature_flags;
164 		info->capabilities = otx_get_capabilities();
165 		info->sym.max_nb_sessions = 0;
166 		info->driver_id = otx_cryptodev_driver_id;
167 		info->min_mbuf_headroom_req = OTX_CPT_MIN_HEADROOM_REQ;
168 		info->min_mbuf_tailroom_req = OTX_CPT_MIN_TAILROOM_REQ;
169 	}
170 }
171 
172 static void
173 otx_cpt_stats_get(struct rte_cryptodev *dev __rte_unused,
174 		  struct rte_cryptodev_stats *stats __rte_unused)
175 {
176 	CPT_PMD_INIT_FUNC_TRACE();
177 }
178 
179 static void
180 otx_cpt_stats_reset(struct rte_cryptodev *dev __rte_unused)
181 {
182 	CPT_PMD_INIT_FUNC_TRACE();
183 }
184 
185 static int
186 otx_cpt_que_pair_setup(struct rte_cryptodev *dev,
187 		       uint16_t que_pair_id,
188 		       const struct rte_cryptodev_qp_conf *qp_conf,
189 		       int socket_id __rte_unused,
190 		       struct rte_mempool *session_pool __rte_unused)
191 {
192 	void *cptvf = dev->data->dev_private;
193 	struct cpt_instance *instance = NULL;
194 	struct rte_pci_device *pci_dev;
195 	int ret = -1;
196 
197 	CPT_PMD_INIT_FUNC_TRACE();
198 
199 	if (dev->data->queue_pairs[que_pair_id] != NULL) {
200 		ret = otx_cpt_que_pair_release(dev, que_pair_id);
201 		if (ret)
202 			return ret;
203 	}
204 
205 	if (qp_conf->nb_descriptors > DEFAULT_CMD_QLEN) {
206 		CPT_LOG_INFO("Number of descriptors too big %d, using default "
207 			     "queue length of %d", qp_conf->nb_descriptors,
208 			     DEFAULT_CMD_QLEN);
209 	}
210 
211 	pci_dev = RTE_DEV_TO_PCI(dev->device);
212 
213 	if (pci_dev->mem_resource[0].addr == NULL) {
214 		CPT_LOG_ERR("PCI mem address null");
215 		return -EIO;
216 	}
217 
218 	ret = otx_cpt_get_resource(cptvf, 0, &instance);
219 	if (ret != 0) {
220 		CPT_LOG_ERR("Error getting instance handle from device %s : "
221 			    "ret = %d", dev->data->name, ret);
222 		return ret;
223 	}
224 
225 	instance->queue_id = que_pair_id;
226 	dev->data->queue_pairs[que_pair_id] = instance;
227 
228 	return 0;
229 }
230 
231 static int
232 otx_cpt_que_pair_release(struct rte_cryptodev *dev, uint16_t que_pair_id)
233 {
234 	struct cpt_instance *instance = dev->data->queue_pairs[que_pair_id];
235 	int ret;
236 
237 	CPT_PMD_INIT_FUNC_TRACE();
238 
239 	ret = otx_cpt_put_resource(instance);
240 	if (ret != 0) {
241 		CPT_LOG_ERR("Error putting instance handle of device %s : "
242 			    "ret = %d", dev->data->name, ret);
243 		return ret;
244 	}
245 
246 	dev->data->queue_pairs[que_pair_id] = NULL;
247 
248 	return 0;
249 }
250 
251 static unsigned int
252 otx_cpt_get_session_size(struct rte_cryptodev *dev __rte_unused)
253 {
254 	return cpt_get_session_size();
255 }
256 
257 static void
258 otx_cpt_session_init(void *sym_sess, uint8_t driver_id)
259 {
260 	struct rte_cryptodev_sym_session *sess = sym_sess;
261 	struct cpt_sess_misc *cpt_sess =
262 	 (struct cpt_sess_misc *) get_sym_session_private_data(sess, driver_id);
263 
264 	CPT_PMD_INIT_FUNC_TRACE();
265 	cpt_sess->ctx_dma_addr = rte_mempool_virt2iova(cpt_sess) +
266 			sizeof(struct cpt_sess_misc);
267 }
268 
269 static int
270 otx_cpt_session_cfg(struct rte_cryptodev *dev,
271 		    struct rte_crypto_sym_xform *xform,
272 		    struct rte_cryptodev_sym_session *sess,
273 		    struct rte_mempool *mempool)
274 {
275 	struct rte_crypto_sym_xform *chain;
276 	void *sess_private_data = NULL;
277 
278 	CPT_PMD_INIT_FUNC_TRACE();
279 
280 	if (cpt_is_algo_supported(xform))
281 		goto err;
282 
283 	if (unlikely(sess == NULL)) {
284 		CPT_LOG_ERR("invalid session struct");
285 		return -EINVAL;
286 	}
287 
288 	if (rte_mempool_get(mempool, &sess_private_data)) {
289 		CPT_LOG_ERR("Could not allocate sess_private_data");
290 		return -ENOMEM;
291 	}
292 
293 	chain = xform;
294 	while (chain) {
295 		switch (chain->type) {
296 		case RTE_CRYPTO_SYM_XFORM_AEAD:
297 			if (fill_sess_aead(chain, sess_private_data))
298 				goto err;
299 			break;
300 		case RTE_CRYPTO_SYM_XFORM_CIPHER:
301 			if (fill_sess_cipher(chain, sess_private_data))
302 				goto err;
303 			break;
304 		case RTE_CRYPTO_SYM_XFORM_AUTH:
305 			if (chain->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
306 				if (fill_sess_gmac(chain, sess_private_data))
307 					goto err;
308 			} else {
309 				if (fill_sess_auth(chain, sess_private_data))
310 					goto err;
311 			}
312 			break;
313 		default:
314 			CPT_LOG_ERR("Invalid crypto xform type");
315 			break;
316 		}
317 		chain = chain->next;
318 	}
319 	set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
320 	otx_cpt_session_init(sess, dev->driver_id);
321 	return 0;
322 
323 err:
324 	if (sess_private_data)
325 		rte_mempool_put(mempool, sess_private_data);
326 	return -EPERM;
327 }
328 
329 static void
330 otx_cpt_session_clear(struct rte_cryptodev *dev,
331 		  struct rte_cryptodev_sym_session *sess)
332 {
333 	void *sess_priv = get_sym_session_private_data(sess, dev->driver_id);
334 
335 	CPT_PMD_INIT_FUNC_TRACE();
336 	if (sess_priv) {
337 		memset(sess_priv, 0, otx_cpt_get_session_size(dev));
338 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
339 		set_sym_session_private_data(sess, dev->driver_id, NULL);
340 		rte_mempool_put(sess_mp, sess_priv);
341 	}
342 }
343 
344 static uint16_t
345 otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
346 {
347 	struct cpt_instance *instance = (struct cpt_instance *)qptr;
348 	uint16_t count = 0;
349 	int ret;
350 	struct cpt_vf *cptvf = (struct cpt_vf *)instance;
351 	struct pending_queue *pqueue = &cptvf->pqueue;
352 
353 	count = DEFAULT_CMD_QLEN - pqueue->pending_count;
354 	if (nb_ops > count)
355 		nb_ops = count;
356 
357 	count = 0;
358 	while (likely(count < nb_ops)) {
359 		ret = cpt_pmd_crypto_operation(instance, ops[count], pqueue,
360 						otx_cryptodev_driver_id);
361 		if (unlikely(ret))
362 			break;
363 		count++;
364 	}
365 	otx_cpt_ring_dbell(instance, count);
366 	return count;
367 }
368 
369 static uint16_t
370 otx_cpt_pkt_dequeue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
371 {
372 	struct cpt_instance *instance = (struct cpt_instance *)qptr;
373 	struct cpt_vf *cptvf = (struct cpt_vf *)instance;
374 	struct pending_queue *pqueue = &cptvf->pqueue;
375 	uint16_t nb_completed, i = 0;
376 	uint8_t compcode[nb_ops];
377 
378 	nb_completed = cpt_dequeue_burst(instance, nb_ops,
379 					 (void **)ops, compcode, pqueue);
380 	while (likely(i < nb_completed)) {
381 		struct rte_crypto_op *cop;
382 		void *metabuf;
383 		uintptr_t *rsp;
384 		uint8_t status;
385 
386 		rsp = (void *)ops[i];
387 		status = compcode[i];
388 		if (likely((i + 1) < nb_completed))
389 			rte_prefetch0(ops[i+1]);
390 		metabuf = (void *)rsp[0];
391 		cop = (void *)rsp[1];
392 
393 		ops[i] = cop;
394 
395 		if (likely(status == 0)) {
396 			if (likely(!rsp[2]))
397 				cop->status =
398 					RTE_CRYPTO_OP_STATUS_SUCCESS;
399 			else
400 				compl_auth_verify(cop, (uint8_t *)rsp[2],
401 						  rsp[3]);
402 		} else if (status == ERR_GC_ICV_MISCOMPARE) {
403 			/*auth data mismatch */
404 			cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
405 		} else {
406 			cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
407 		}
408 		free_op_meta(metabuf, cptvf->meta_info.cptvf_meta_pool);
409 		i++;
410 	}
411 	return nb_completed;
412 }
413 
414 static struct rte_cryptodev_ops cptvf_ops = {
415 	/* Device related operations */
416 	.dev_configure = otx_cpt_dev_config,
417 	.dev_start = otx_cpt_dev_start,
418 	.dev_stop = otx_cpt_dev_stop,
419 	.dev_close = otx_cpt_dev_close,
420 	.dev_infos_get = otx_cpt_dev_info_get,
421 
422 	.stats_get = otx_cpt_stats_get,
423 	.stats_reset = otx_cpt_stats_reset,
424 	.queue_pair_setup = otx_cpt_que_pair_setup,
425 	.queue_pair_release = otx_cpt_que_pair_release,
426 	.queue_pair_count = NULL,
427 
428 	/* Crypto related operations */
429 	.sym_session_get_size = otx_cpt_get_session_size,
430 	.sym_session_configure = otx_cpt_session_cfg,
431 	.sym_session_clear = otx_cpt_session_clear
432 };
433 
434 static void
435 otx_cpt_common_vars_init(struct cpt_vf *cptvf)
436 {
437 	cptvf->meta_info.cptvf_meta_pool = otx_cpt_meta_pool;
438 	cptvf->meta_info.cptvf_op_mlen = otx_cpt_op_mlen;
439 	cptvf->meta_info.cptvf_op_sb_mlen = otx_cpt_op_sb_mlen;
440 }
441 
442 int
443 otx_cpt_dev_create(struct rte_cryptodev *c_dev)
444 {
445 	struct rte_pci_device *pdev = RTE_DEV_TO_PCI(c_dev->device);
446 	struct cpt_vf *cptvf = NULL;
447 	void *reg_base;
448 	char dev_name[32];
449 	int ret;
450 
451 	if (pdev->mem_resource[0].phys_addr == 0ULL)
452 		return -EIO;
453 
454 	/* for secondary processes, we don't initialise any further as primary
455 	 * has already done this work.
456 	 */
457 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
458 		return 0;
459 
460 	cptvf = rte_zmalloc_socket("otx_cryptodev_private_mem",
461 			sizeof(struct cpt_vf), RTE_CACHE_LINE_SIZE,
462 			rte_socket_id());
463 
464 	if (cptvf == NULL) {
465 		CPT_LOG_ERR("Cannot allocate memory for device private data");
466 		return -ENOMEM;
467 	}
468 
469 	snprintf(dev_name, 32, "%02x:%02x.%x",
470 			pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
471 
472 	reg_base = pdev->mem_resource[0].addr;
473 	if (!reg_base) {
474 		CPT_LOG_ERR("Failed to map BAR0 of %s", dev_name);
475 		ret = -ENODEV;
476 		goto fail;
477 	}
478 
479 	ret = otx_cpt_hw_init(cptvf, pdev, reg_base, dev_name);
480 	if (ret) {
481 		CPT_LOG_ERR("Failed to init cptvf %s", dev_name);
482 		ret = -EIO;
483 		goto fail;
484 	}
485 
486 	/* Start off timer for mailbox interrupts */
487 	otx_cpt_periodic_alarm_start(cptvf);
488 
489 	rte_spinlock_lock(&otx_probe_count_lock);
490 	if (!otx_cryptodev_probe_count) {
491 		ret = init_global_resources();
492 		if (ret) {
493 			rte_spinlock_unlock(&otx_probe_count_lock);
494 			goto init_fail;
495 		}
496 	}
497 	otx_cryptodev_probe_count++;
498 	rte_spinlock_unlock(&otx_probe_count_lock);
499 
500 	/* Initialize data path variables used by common code */
501 	otx_cpt_common_vars_init(cptvf);
502 
503 	c_dev->dev_ops = &cptvf_ops;
504 
505 	c_dev->enqueue_burst = otx_cpt_pkt_enqueue;
506 	c_dev->dequeue_burst = otx_cpt_pkt_dequeue;
507 
508 	c_dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
509 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
510 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
511 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
512 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
513 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT;
514 
515 	/* Save dev private data */
516 	c_dev->data->dev_private = cptvf;
517 
518 	return 0;
519 
520 init_fail:
521 	otx_cpt_periodic_alarm_stop(cptvf);
522 	otx_cpt_deinit_device(cptvf);
523 
524 fail:
525 	if (cptvf) {
526 		/* Free private data allocated */
527 		rte_free(cptvf);
528 	}
529 
530 	return ret;
531 }
532