xref: /dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c (revision beb4c305b3793fc80d9a1880eb352ade39c2a4b9)
15566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
25566a3e3SBruce Richardson  * Copyright(c) 2017 Intel Corporation
357523e68SFan Zhang  */
457523e68SFan Zhang #include <string.h>
557523e68SFan Zhang 
657523e68SFan Zhang #include <rte_common.h>
757523e68SFan Zhang #include <rte_malloc.h>
857523e68SFan Zhang #include <rte_dev.h>
957523e68SFan Zhang #include <rte_cryptodev.h>
10af668035SAkhil Goyal #include <cryptodev_pmd.h>
1157523e68SFan Zhang #include <rte_reorder.h>
1257523e68SFan Zhang 
1357523e68SFan Zhang #include "scheduler_pmd_private.h"
1457523e68SFan Zhang 
1585b00824SAdam Dybkowski /** attaching the workers predefined by scheduler's EAL options */
1650e14527SFan Zhang static int
1785b00824SAdam Dybkowski scheduler_attach_init_worker(struct rte_cryptodev *dev)
1850e14527SFan Zhang {
1950e14527SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
2050e14527SFan Zhang 	uint8_t scheduler_id = dev->data->dev_id;
2150e14527SFan Zhang 	int i;
2250e14527SFan Zhang 
2385b00824SAdam Dybkowski 	for (i = sched_ctx->nb_init_workers - 1; i >= 0; i--) {
2485b00824SAdam Dybkowski 		const char *dev_name = sched_ctx->init_worker_names[i];
2585b00824SAdam Dybkowski 		struct rte_cryptodev *worker_dev =
2650e14527SFan Zhang 				rte_cryptodev_pmd_get_named_dev(dev_name);
2750e14527SFan Zhang 		int status;
2850e14527SFan Zhang 
2985b00824SAdam Dybkowski 		if (!worker_dev) {
3085b00824SAdam Dybkowski 			CR_SCHED_LOG(ERR, "Failed to locate worker dev %s",
3150e14527SFan Zhang 					dev_name);
3250e14527SFan Zhang 			return -EINVAL;
3350e14527SFan Zhang 		}
3450e14527SFan Zhang 
3585b00824SAdam Dybkowski 		status = rte_cryptodev_scheduler_worker_attach(
3685b00824SAdam Dybkowski 				scheduler_id, worker_dev->data->dev_id);
3750e14527SFan Zhang 
3850e14527SFan Zhang 		if (status < 0) {
3985b00824SAdam Dybkowski 			CR_SCHED_LOG(ERR, "Failed to attach worker cryptodev %u",
4085b00824SAdam Dybkowski 					worker_dev->data->dev_id);
4150e14527SFan Zhang 			return status;
4250e14527SFan Zhang 		}
4350e14527SFan Zhang 
4485b00824SAdam Dybkowski 		CR_SCHED_LOG(INFO, "Scheduler %s attached worker %s",
4550e14527SFan Zhang 				dev->data->name,
4685b00824SAdam Dybkowski 				sched_ctx->init_worker_names[i]);
4750e14527SFan Zhang 
4885b00824SAdam Dybkowski 		rte_free(sched_ctx->init_worker_names[i]);
4985b00824SAdam Dybkowski 		sched_ctx->init_worker_names[i] = NULL;
5050e14527SFan Zhang 
5185b00824SAdam Dybkowski 		sched_ctx->nb_init_workers -= 1;
5250e14527SFan Zhang 	}
5350e14527SFan Zhang 
5450e14527SFan Zhang 	return 0;
5550e14527SFan Zhang }
5657523e68SFan Zhang /** Configure device */
5757523e68SFan Zhang static int
5860e686c2SFan Zhang scheduler_pmd_config(struct rte_cryptodev *dev,
59b3bbd9e5SSlawomir Mrozowicz 		struct rte_cryptodev_config *config)
6057523e68SFan Zhang {
61b3bbd9e5SSlawomir Mrozowicz 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
62b3bbd9e5SSlawomir Mrozowicz 	uint32_t i;
6350e14527SFan Zhang 	int ret;
6450e14527SFan Zhang 
6585b00824SAdam Dybkowski 	/* although scheduler_attach_init_worker presents multiple times,
6650e14527SFan Zhang 	 * there will be only 1 meaningful execution.
6750e14527SFan Zhang 	 */
6885b00824SAdam Dybkowski 	ret = scheduler_attach_init_worker(dev);
6950e14527SFan Zhang 	if (ret < 0)
7050e14527SFan Zhang 		return ret;
7157523e68SFan Zhang 
7285b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
7385b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
74b3bbd9e5SSlawomir Mrozowicz 
7585b00824SAdam Dybkowski 		ret = rte_cryptodev_configure(worker_dev_id, config);
76b3bbd9e5SSlawomir Mrozowicz 		if (ret < 0)
77b3bbd9e5SSlawomir Mrozowicz 			break;
78b3bbd9e5SSlawomir Mrozowicz 	}
79b3bbd9e5SSlawomir Mrozowicz 
8057523e68SFan Zhang 	return ret;
8157523e68SFan Zhang }
8257523e68SFan Zhang 
8357523e68SFan Zhang static int
848a48e039SFan Zhang update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
8557523e68SFan Zhang {
8657523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
8757523e68SFan Zhang 	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
8857523e68SFan Zhang 
8957523e68SFan Zhang 	if (sched_ctx->reordering_enabled) {
908a48e039SFan Zhang 		char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
918a48e039SFan Zhang 		uint32_t buff_size = rte_align32pow2(
9285b00824SAdam Dybkowski 			sched_ctx->nb_workers * PER_WORKER_BUFF_SIZE);
9357523e68SFan Zhang 
948a48e039SFan Zhang 		if (qp_ctx->order_ring) {
958a48e039SFan Zhang 			rte_ring_free(qp_ctx->order_ring);
968a48e039SFan Zhang 			qp_ctx->order_ring = NULL;
9757523e68SFan Zhang 		}
9857523e68SFan Zhang 
9957523e68SFan Zhang 		if (!buff_size)
10057523e68SFan Zhang 			return 0;
10157523e68SFan Zhang 
1028a48e039SFan Zhang 		if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN,
10357523e68SFan Zhang 			"%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
10457523e68SFan Zhang 			dev->data->dev_id, qp_id) < 0) {
10585aa6d34SHari Kumar 			CR_SCHED_LOG(ERR, "failed to create unique reorder buffer"
10657523e68SFan Zhang 					"name");
10757523e68SFan Zhang 			return -ENOMEM;
10857523e68SFan Zhang 		}
10957523e68SFan Zhang 
1108a48e039SFan Zhang 		qp_ctx->order_ring = rte_ring_create(order_ring_name,
1118a48e039SFan Zhang 				buff_size, rte_socket_id(),
1128a48e039SFan Zhang 				RING_F_SP_ENQ | RING_F_SC_DEQ);
1138a48e039SFan Zhang 		if (!qp_ctx->order_ring) {
11485aa6d34SHari Kumar 			CR_SCHED_LOG(ERR, "failed to create order ring");
11557523e68SFan Zhang 			return -ENOMEM;
11657523e68SFan Zhang 		}
11757523e68SFan Zhang 	} else {
1188a48e039SFan Zhang 		if (qp_ctx->order_ring) {
1198a48e039SFan Zhang 			rte_ring_free(qp_ctx->order_ring);
1208a48e039SFan Zhang 			qp_ctx->order_ring = NULL;
12157523e68SFan Zhang 		}
12257523e68SFan Zhang 	}
12357523e68SFan Zhang 
12457523e68SFan Zhang 	return 0;
12557523e68SFan Zhang }
12657523e68SFan Zhang 
12757523e68SFan Zhang /** Start device */
12857523e68SFan Zhang static int
12957523e68SFan Zhang scheduler_pmd_start(struct rte_cryptodev *dev)
13057523e68SFan Zhang {
13157523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
13257523e68SFan Zhang 	uint32_t i;
13357523e68SFan Zhang 	int ret;
13457523e68SFan Zhang 
13557523e68SFan Zhang 	if (dev->data->dev_started)
13657523e68SFan Zhang 		return 0;
13757523e68SFan Zhang 
13885b00824SAdam Dybkowski 	/* although scheduler_attach_init_worker presents multiple times,
13950e14527SFan Zhang 	 * there will be only 1 meaningful execution.
14050e14527SFan Zhang 	 */
14185b00824SAdam Dybkowski 	ret = scheduler_attach_init_worker(dev);
14250e14527SFan Zhang 	if (ret < 0)
14350e14527SFan Zhang 		return ret;
14450e14527SFan Zhang 
14557523e68SFan Zhang 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1468a48e039SFan Zhang 		ret = update_order_ring(dev, i);
14757523e68SFan Zhang 		if (ret < 0) {
14885aa6d34SHari Kumar 			CR_SCHED_LOG(ERR, "Failed to update reorder buffer");
14957523e68SFan Zhang 			return ret;
15057523e68SFan Zhang 		}
15157523e68SFan Zhang 	}
15257523e68SFan Zhang 
15357523e68SFan Zhang 	if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) {
15485aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Scheduler mode is not set");
15557523e68SFan Zhang 		return -1;
15657523e68SFan Zhang 	}
15757523e68SFan Zhang 
15885b00824SAdam Dybkowski 	if (!sched_ctx->nb_workers) {
15985b00824SAdam Dybkowski 		CR_SCHED_LOG(ERR, "No worker in the scheduler");
16057523e68SFan Zhang 		return -1;
16157523e68SFan Zhang 	}
16257523e68SFan Zhang 
16385b00824SAdam Dybkowski 	RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.worker_attach, -ENOTSUP);
16457523e68SFan Zhang 
16585b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
16685b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
16757523e68SFan Zhang 
16885b00824SAdam Dybkowski 		if ((*sched_ctx->ops.worker_attach)(dev, worker_dev_id) < 0) {
16985b00824SAdam Dybkowski 			CR_SCHED_LOG(ERR, "Failed to attach worker");
17057523e68SFan Zhang 			return -ENOTSUP;
17157523e68SFan Zhang 		}
17257523e68SFan Zhang 	}
17357523e68SFan Zhang 
17457523e68SFan Zhang 	RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP);
17557523e68SFan Zhang 
17657523e68SFan Zhang 	if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
17785aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Scheduler start failed");
17857523e68SFan Zhang 		return -1;
17957523e68SFan Zhang 	}
18057523e68SFan Zhang 
18185b00824SAdam Dybkowski 	/* start all workers */
18285b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
18385b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
184*beb4c305SAkhil Goyal 		ret = rte_cryptodev_start(worker_dev_id);
18557523e68SFan Zhang 		if (ret < 0) {
18685b00824SAdam Dybkowski 			CR_SCHED_LOG(ERR, "Failed to start worker dev %u",
18785b00824SAdam Dybkowski 					worker_dev_id);
18857523e68SFan Zhang 			return ret;
18957523e68SFan Zhang 		}
19057523e68SFan Zhang 	}
19157523e68SFan Zhang 
19257523e68SFan Zhang 	return 0;
19357523e68SFan Zhang }
19457523e68SFan Zhang 
19557523e68SFan Zhang /** Stop device */
19657523e68SFan Zhang static void
19757523e68SFan Zhang scheduler_pmd_stop(struct rte_cryptodev *dev)
19857523e68SFan Zhang {
19957523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
20057523e68SFan Zhang 	uint32_t i;
20157523e68SFan Zhang 
20257523e68SFan Zhang 	if (!dev->data->dev_started)
20357523e68SFan Zhang 		return;
20457523e68SFan Zhang 
20585b00824SAdam Dybkowski 	/* stop all workers first */
20685b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
20785b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
20857523e68SFan Zhang 
209*beb4c305SAkhil Goyal 		rte_cryptodev_stop(worker_dev_id);
21057523e68SFan Zhang 	}
21157523e68SFan Zhang 
21257523e68SFan Zhang 	if (*sched_ctx->ops.scheduler_stop)
21357523e68SFan Zhang 		(*sched_ctx->ops.scheduler_stop)(dev);
21457523e68SFan Zhang 
21585b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
21685b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
21757523e68SFan Zhang 
21885b00824SAdam Dybkowski 		if (*sched_ctx->ops.worker_detach)
21985b00824SAdam Dybkowski 			(*sched_ctx->ops.worker_detach)(dev, worker_dev_id);
22057523e68SFan Zhang 	}
22157523e68SFan Zhang }
22257523e68SFan Zhang 
22357523e68SFan Zhang /** Close device */
22457523e68SFan Zhang static int
22557523e68SFan Zhang scheduler_pmd_close(struct rte_cryptodev *dev)
22657523e68SFan Zhang {
22757523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
22857523e68SFan Zhang 	uint32_t i;
22957523e68SFan Zhang 	int ret;
23057523e68SFan Zhang 
23157523e68SFan Zhang 	/* the dev should be stopped before being closed */
23257523e68SFan Zhang 	if (dev->data->dev_started)
23357523e68SFan Zhang 		return -EBUSY;
23457523e68SFan Zhang 
23585b00824SAdam Dybkowski 	/* close all workers first */
23685b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
23785b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
23885b00824SAdam Dybkowski 		struct rte_cryptodev *worker_dev =
23985b00824SAdam Dybkowski 				rte_cryptodev_pmd_get_dev(worker_dev_id);
24057523e68SFan Zhang 
24185b00824SAdam Dybkowski 		ret = (*worker_dev->dev_ops->dev_close)(worker_dev);
24257523e68SFan Zhang 		if (ret < 0)
24357523e68SFan Zhang 			return ret;
24457523e68SFan Zhang 	}
24557523e68SFan Zhang 
24657523e68SFan Zhang 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
24757523e68SFan Zhang 		struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
24857523e68SFan Zhang 
2498a48e039SFan Zhang 		if (qp_ctx->order_ring) {
2508a48e039SFan Zhang 			rte_ring_free(qp_ctx->order_ring);
2518a48e039SFan Zhang 			qp_ctx->order_ring = NULL;
25257523e68SFan Zhang 		}
25357523e68SFan Zhang 
25457523e68SFan Zhang 		if (qp_ctx->private_qp_ctx) {
25557523e68SFan Zhang 			rte_free(qp_ctx->private_qp_ctx);
25657523e68SFan Zhang 			qp_ctx->private_qp_ctx = NULL;
25757523e68SFan Zhang 		}
25857523e68SFan Zhang 	}
25957523e68SFan Zhang 
26006f0a569SPablo de Lara 	if (sched_ctx->private_ctx) {
26157523e68SFan Zhang 		rte_free(sched_ctx->private_ctx);
26206f0a569SPablo de Lara 		sched_ctx->private_ctx = NULL;
26306f0a569SPablo de Lara 	}
26457523e68SFan Zhang 
26506f0a569SPablo de Lara 	if (sched_ctx->capabilities) {
26657523e68SFan Zhang 		rte_free(sched_ctx->capabilities);
26706f0a569SPablo de Lara 		sched_ctx->capabilities = NULL;
26806f0a569SPablo de Lara 	}
26957523e68SFan Zhang 
27057523e68SFan Zhang 	return 0;
27157523e68SFan Zhang }
27257523e68SFan Zhang 
27357523e68SFan Zhang /** Get device statistics */
27457523e68SFan Zhang static void
27557523e68SFan Zhang scheduler_pmd_stats_get(struct rte_cryptodev *dev,
27657523e68SFan Zhang 	struct rte_cryptodev_stats *stats)
27757523e68SFan Zhang {
27857523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
27957523e68SFan Zhang 	uint32_t i;
28057523e68SFan Zhang 
28185b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
28285b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
28385b00824SAdam Dybkowski 		struct rte_cryptodev *worker_dev =
28485b00824SAdam Dybkowski 				rte_cryptodev_pmd_get_dev(worker_dev_id);
28585b00824SAdam Dybkowski 		struct rte_cryptodev_stats worker_stats = {0};
28657523e68SFan Zhang 
28785b00824SAdam Dybkowski 		(*worker_dev->dev_ops->stats_get)(worker_dev, &worker_stats);
28857523e68SFan Zhang 
28985b00824SAdam Dybkowski 		stats->enqueued_count += worker_stats.enqueued_count;
29085b00824SAdam Dybkowski 		stats->dequeued_count += worker_stats.dequeued_count;
29157523e68SFan Zhang 
29285b00824SAdam Dybkowski 		stats->enqueue_err_count += worker_stats.enqueue_err_count;
29385b00824SAdam Dybkowski 		stats->dequeue_err_count += worker_stats.dequeue_err_count;
29457523e68SFan Zhang 	}
29557523e68SFan Zhang }
29657523e68SFan Zhang 
29757523e68SFan Zhang /** Reset device statistics */
29857523e68SFan Zhang static void
29957523e68SFan Zhang scheduler_pmd_stats_reset(struct rte_cryptodev *dev)
30057523e68SFan Zhang {
30157523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
30257523e68SFan Zhang 	uint32_t i;
30357523e68SFan Zhang 
30485b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
30585b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
30685b00824SAdam Dybkowski 		struct rte_cryptodev *worker_dev =
30785b00824SAdam Dybkowski 				rte_cryptodev_pmd_get_dev(worker_dev_id);
30857523e68SFan Zhang 
30985b00824SAdam Dybkowski 		(*worker_dev->dev_ops->stats_reset)(worker_dev);
31057523e68SFan Zhang 	}
31157523e68SFan Zhang }
31257523e68SFan Zhang 
31357523e68SFan Zhang /** Get device info */
31457523e68SFan Zhang static void
31557523e68SFan Zhang scheduler_pmd_info_get(struct rte_cryptodev *dev,
31657523e68SFan Zhang 		struct rte_cryptodev_info *dev_info)
31757523e68SFan Zhang {
31857523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
319e1fc5b76SPablo de Lara 	uint32_t max_nb_sess = 0;
3206c8e8dfeSAnoob Joseph 	uint16_t headroom_sz = 0;
3216c8e8dfeSAnoob Joseph 	uint16_t tailroom_sz = 0;
32257523e68SFan Zhang 	uint32_t i;
32357523e68SFan Zhang 
32457523e68SFan Zhang 	if (!dev_info)
32557523e68SFan Zhang 		return;
32657523e68SFan Zhang 
32785b00824SAdam Dybkowski 	/* although scheduler_attach_init_worker presents multiple times,
32850e14527SFan Zhang 	 * there will be only 1 meaningful execution.
32950e14527SFan Zhang 	 */
33085b00824SAdam Dybkowski 	scheduler_attach_init_worker(dev);
33150e14527SFan Zhang 
33285b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
33385b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
33485b00824SAdam Dybkowski 		struct rte_cryptodev_info worker_info;
33557523e68SFan Zhang 
33685b00824SAdam Dybkowski 		rte_cryptodev_info_get(worker_dev_id, &worker_info);
33785b00824SAdam Dybkowski 		uint32_t dev_max_sess = worker_info.sym.max_nb_sessions;
338e1fc5b76SPablo de Lara 		if (dev_max_sess != 0) {
339e1fc5b76SPablo de Lara 			if (max_nb_sess == 0 ||	dev_max_sess < max_nb_sess)
34085b00824SAdam Dybkowski 				max_nb_sess = worker_info.sym.max_nb_sessions;
341e1fc5b76SPablo de Lara 		}
3426c8e8dfeSAnoob Joseph 
34385b00824SAdam Dybkowski 		/* Get the max headroom requirement among worker PMDs */
34485b00824SAdam Dybkowski 		headroom_sz = worker_info.min_mbuf_headroom_req >
3456c8e8dfeSAnoob Joseph 				headroom_sz ?
34685b00824SAdam Dybkowski 				worker_info.min_mbuf_headroom_req :
3476c8e8dfeSAnoob Joseph 				headroom_sz;
3486c8e8dfeSAnoob Joseph 
34985b00824SAdam Dybkowski 		/* Get the max tailroom requirement among worker PMDs */
35085b00824SAdam Dybkowski 		tailroom_sz = worker_info.min_mbuf_tailroom_req >
3516c8e8dfeSAnoob Joseph 				tailroom_sz ?
35285b00824SAdam Dybkowski 				worker_info.min_mbuf_tailroom_req :
3536c8e8dfeSAnoob Joseph 				tailroom_sz;
35457523e68SFan Zhang 	}
35557523e68SFan Zhang 
3567a364faeSSlawomir Mrozowicz 	dev_info->driver_id = dev->driver_id;
35757523e68SFan Zhang 	dev_info->feature_flags = dev->feature_flags;
35857523e68SFan Zhang 	dev_info->capabilities = sched_ctx->capabilities;
35957523e68SFan Zhang 	dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
3606c8e8dfeSAnoob Joseph 	dev_info->min_mbuf_headroom_req = headroom_sz;
3616c8e8dfeSAnoob Joseph 	dev_info->min_mbuf_tailroom_req = tailroom_sz;
362e1fc5b76SPablo de Lara 	dev_info->sym.max_nb_sessions = max_nb_sess;
36357523e68SFan Zhang }
36457523e68SFan Zhang 
36557523e68SFan Zhang /** Release queue pair */
36657523e68SFan Zhang static int
36757523e68SFan Zhang scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
36857523e68SFan Zhang {
36957523e68SFan Zhang 	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
37057523e68SFan Zhang 
37157523e68SFan Zhang 	if (!qp_ctx)
37257523e68SFan Zhang 		return 0;
37357523e68SFan Zhang 
3748a48e039SFan Zhang 	if (qp_ctx->order_ring)
3758a48e039SFan Zhang 		rte_ring_free(qp_ctx->order_ring);
37657523e68SFan Zhang 	if (qp_ctx->private_qp_ctx)
37757523e68SFan Zhang 		rte_free(qp_ctx->private_qp_ctx);
37857523e68SFan Zhang 
37957523e68SFan Zhang 	rte_free(qp_ctx);
38057523e68SFan Zhang 	dev->data->queue_pairs[qp_id] = NULL;
38157523e68SFan Zhang 
38257523e68SFan Zhang 	return 0;
38357523e68SFan Zhang }
38457523e68SFan Zhang 
38557523e68SFan Zhang /** Setup a queue pair */
38657523e68SFan Zhang static int
38757523e68SFan Zhang scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
388725d2a7fSFan Zhang 	const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
38957523e68SFan Zhang {
39057523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
39157523e68SFan Zhang 	struct scheduler_qp_ctx *qp_ctx;
39257523e68SFan Zhang 	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
393c281019bSFan Zhang 	uint32_t i;
394c281019bSFan Zhang 	int ret;
39557523e68SFan Zhang 
39657523e68SFan Zhang 	if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
39757523e68SFan Zhang 			"CRYTO_SCHE PMD %u QP %u",
39857523e68SFan Zhang 			dev->data->dev_id, qp_id) < 0) {
39985aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Failed to create unique queue pair name");
40057523e68SFan Zhang 		return -EFAULT;
40157523e68SFan Zhang 	}
40257523e68SFan Zhang 
40357523e68SFan Zhang 	/* Free memory prior to re-allocation if needed. */
40457523e68SFan Zhang 	if (dev->data->queue_pairs[qp_id] != NULL)
40557523e68SFan Zhang 		scheduler_pmd_qp_release(dev, qp_id);
40657523e68SFan Zhang 
40785b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
40885b00824SAdam Dybkowski 		uint8_t worker_id = sched_ctx->workers[i].dev_id;
409c281019bSFan Zhang 
410f7db6f82SPablo de Lara 		/*
41185b00824SAdam Dybkowski 		 * All workers will share the same session mempool
412f7db6f82SPablo de Lara 		 * for session-less operations, so the objects
413f7db6f82SPablo de Lara 		 * must be big enough for all the drivers used.
414f7db6f82SPablo de Lara 		 */
41585b00824SAdam Dybkowski 		ret = rte_cryptodev_queue_pair_setup(worker_id, qp_id,
416725d2a7fSFan Zhang 				qp_conf, socket_id);
417c281019bSFan Zhang 		if (ret < 0)
418c281019bSFan Zhang 			return ret;
419c281019bSFan Zhang 	}
420c281019bSFan Zhang 
42157523e68SFan Zhang 	/* Allocate the queue pair data structure. */
42257523e68SFan Zhang 	qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
42357523e68SFan Zhang 			socket_id);
42457523e68SFan Zhang 	if (qp_ctx == NULL)
42557523e68SFan Zhang 		return -ENOMEM;
42657523e68SFan Zhang 
42788405476SFan Zhang 	/* The actual available object number = nb_descriptors - 1 */
42888405476SFan Zhang 	qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1;
42988405476SFan Zhang 
43057523e68SFan Zhang 	dev->data->queue_pairs[qp_id] = qp_ctx;
43157523e68SFan Zhang 
43285b00824SAdam Dybkowski 	/* although scheduler_attach_init_worker presents multiple times,
43350e14527SFan Zhang 	 * there will be only 1 meaningful execution.
43450e14527SFan Zhang 	 */
43585b00824SAdam Dybkowski 	ret = scheduler_attach_init_worker(dev);
43650e14527SFan Zhang 	if (ret < 0) {
43785b00824SAdam Dybkowski 		CR_SCHED_LOG(ERR, "Failed to attach worker");
43850e14527SFan Zhang 		scheduler_pmd_qp_release(dev, qp_id);
43950e14527SFan Zhang 		return ret;
44050e14527SFan Zhang 	}
44150e14527SFan Zhang 
44257523e68SFan Zhang 	if (*sched_ctx->ops.config_queue_pair) {
44357523e68SFan Zhang 		if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) {
44485aa6d34SHari Kumar 			CR_SCHED_LOG(ERR, "Unable to configure queue pair");
44557523e68SFan Zhang 			return -1;
44657523e68SFan Zhang 		}
44757523e68SFan Zhang 	}
44857523e68SFan Zhang 
44957523e68SFan Zhang 	return 0;
45057523e68SFan Zhang }
45157523e68SFan Zhang 
45257523e68SFan Zhang static uint32_t
453012c5076SPablo de Lara scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
45457523e68SFan Zhang {
455b3bbd9e5SSlawomir Mrozowicz 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
456b3bbd9e5SSlawomir Mrozowicz 	uint8_t i = 0;
457b3bbd9e5SSlawomir Mrozowicz 	uint32_t max_priv_sess_size = 0;
458b3bbd9e5SSlawomir Mrozowicz 
45985b00824SAdam Dybkowski 	/* Check what is the maximum private session size for all workers */
46085b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
46185b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
46285b00824SAdam Dybkowski 		struct rte_cryptodev *dev = &rte_cryptodevs[worker_dev_id];
463012c5076SPablo de Lara 		uint32_t priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
464b3bbd9e5SSlawomir Mrozowicz 
465b3bbd9e5SSlawomir Mrozowicz 		if (max_priv_sess_size < priv_sess_size)
466b3bbd9e5SSlawomir Mrozowicz 			max_priv_sess_size = priv_sess_size;
467b3bbd9e5SSlawomir Mrozowicz 	}
468b3bbd9e5SSlawomir Mrozowicz 
469b3bbd9e5SSlawomir Mrozowicz 	return max_priv_sess_size;
47057523e68SFan Zhang }
47157523e68SFan Zhang 
47257523e68SFan Zhang static int
473012c5076SPablo de Lara scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev,
47457523e68SFan Zhang 	struct rte_crypto_sym_xform *xform,
475b3bbd9e5SSlawomir Mrozowicz 	struct rte_cryptodev_sym_session *sess,
476b3bbd9e5SSlawomir Mrozowicz 	struct rte_mempool *mempool)
47757523e68SFan Zhang {
478b3bbd9e5SSlawomir Mrozowicz 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
47957523e68SFan Zhang 	uint32_t i;
48027391b53SPablo de Lara 	int ret;
48157523e68SFan Zhang 
48285b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
48385b00824SAdam Dybkowski 		struct scheduler_worker *worker = &sched_ctx->workers[i];
48457523e68SFan Zhang 
48585b00824SAdam Dybkowski 		ret = rte_cryptodev_sym_session_init(worker->dev_id, sess,
48627391b53SPablo de Lara 					xform, mempool);
48727391b53SPablo de Lara 		if (ret < 0) {
48885aa6d34SHari Kumar 			CR_SCHED_LOG(ERR, "unable to config sym session");
48927391b53SPablo de Lara 			return ret;
49057523e68SFan Zhang 		}
49157523e68SFan Zhang 	}
49257523e68SFan Zhang 
49357523e68SFan Zhang 	return 0;
49457523e68SFan Zhang }
49557523e68SFan Zhang 
49657523e68SFan Zhang /** Clear the memory of session so it doesn't leave key material behind */
49757523e68SFan Zhang static void
498012c5076SPablo de Lara scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev,
499b3bbd9e5SSlawomir Mrozowicz 		struct rte_cryptodev_sym_session *sess)
50057523e68SFan Zhang {
50157523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
502b3bbd9e5SSlawomir Mrozowicz 	uint32_t i;
50357523e68SFan Zhang 
50485b00824SAdam Dybkowski 	/* Clear private data of workers */
50585b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
50685b00824SAdam Dybkowski 		struct scheduler_worker *worker = &sched_ctx->workers[i];
50757523e68SFan Zhang 
50885b00824SAdam Dybkowski 		rte_cryptodev_sym_session_clear(worker->dev_id, sess);
50957523e68SFan Zhang 	}
51057523e68SFan Zhang }
51157523e68SFan Zhang 
512b74fd6b8SFerruh Yigit static struct rte_cryptodev_ops scheduler_pmd_ops = {
51357523e68SFan Zhang 		.dev_configure		= scheduler_pmd_config,
51457523e68SFan Zhang 		.dev_start		= scheduler_pmd_start,
51557523e68SFan Zhang 		.dev_stop		= scheduler_pmd_stop,
51657523e68SFan Zhang 		.dev_close		= scheduler_pmd_close,
51757523e68SFan Zhang 
51857523e68SFan Zhang 		.stats_get		= scheduler_pmd_stats_get,
51957523e68SFan Zhang 		.stats_reset		= scheduler_pmd_stats_reset,
52057523e68SFan Zhang 
52157523e68SFan Zhang 		.dev_infos_get		= scheduler_pmd_info_get,
52257523e68SFan Zhang 
52357523e68SFan Zhang 		.queue_pair_setup	= scheduler_pmd_qp_setup,
52457523e68SFan Zhang 		.queue_pair_release	= scheduler_pmd_qp_release,
52557523e68SFan Zhang 
526012c5076SPablo de Lara 		.sym_session_get_size	= scheduler_pmd_sym_session_get_size,
527012c5076SPablo de Lara 		.sym_session_configure	= scheduler_pmd_sym_session_configure,
528012c5076SPablo de Lara 		.sym_session_clear	= scheduler_pmd_sym_session_clear,
52957523e68SFan Zhang };
53057523e68SFan Zhang 
53157523e68SFan Zhang struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;
532