xref: /dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c (revision 6812b9bf470e4725e4ff878f82935ed5c45af7c6)
15566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
25566a3e3SBruce Richardson  * Copyright(c) 2017 Intel Corporation
357523e68SFan Zhang  */
457523e68SFan Zhang #include <string.h>
557523e68SFan Zhang 
657523e68SFan Zhang #include <rte_common.h>
757523e68SFan Zhang #include <rte_malloc.h>
81acb7f54SDavid Marchand #include <dev_driver.h>
957523e68SFan Zhang #include <rte_cryptodev.h>
10af668035SAkhil Goyal #include <cryptodev_pmd.h>
1157523e68SFan Zhang #include <rte_reorder.h>
12*6812b9bfSFan Zhang #include <rte_errno.h>
1357523e68SFan Zhang 
1457523e68SFan Zhang #include "scheduler_pmd_private.h"
1557523e68SFan Zhang 
1685b00824SAdam Dybkowski /** attaching the workers predefined by scheduler's EAL options */
1750e14527SFan Zhang static int
1885b00824SAdam Dybkowski scheduler_attach_init_worker(struct rte_cryptodev *dev)
1950e14527SFan Zhang {
2050e14527SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
2150e14527SFan Zhang 	uint8_t scheduler_id = dev->data->dev_id;
2250e14527SFan Zhang 	int i;
2350e14527SFan Zhang 
2485b00824SAdam Dybkowski 	for (i = sched_ctx->nb_init_workers - 1; i >= 0; i--) {
2585b00824SAdam Dybkowski 		const char *dev_name = sched_ctx->init_worker_names[i];
2685b00824SAdam Dybkowski 		struct rte_cryptodev *worker_dev =
2750e14527SFan Zhang 				rte_cryptodev_pmd_get_named_dev(dev_name);
2850e14527SFan Zhang 		int status;
2950e14527SFan Zhang 
3085b00824SAdam Dybkowski 		if (!worker_dev) {
3185b00824SAdam Dybkowski 			CR_SCHED_LOG(ERR, "Failed to locate worker dev %s",
3250e14527SFan Zhang 					dev_name);
3350e14527SFan Zhang 			return -EINVAL;
3450e14527SFan Zhang 		}
3550e14527SFan Zhang 
3685b00824SAdam Dybkowski 		status = rte_cryptodev_scheduler_worker_attach(
3785b00824SAdam Dybkowski 				scheduler_id, worker_dev->data->dev_id);
3850e14527SFan Zhang 
3950e14527SFan Zhang 		if (status < 0) {
4085b00824SAdam Dybkowski 			CR_SCHED_LOG(ERR, "Failed to attach worker cryptodev %u",
4185b00824SAdam Dybkowski 					worker_dev->data->dev_id);
4250e14527SFan Zhang 			return status;
4350e14527SFan Zhang 		}
4450e14527SFan Zhang 
4585b00824SAdam Dybkowski 		CR_SCHED_LOG(INFO, "Scheduler %s attached worker %s",
4650e14527SFan Zhang 				dev->data->name,
4785b00824SAdam Dybkowski 				sched_ctx->init_worker_names[i]);
4850e14527SFan Zhang 
4985b00824SAdam Dybkowski 		rte_free(sched_ctx->init_worker_names[i]);
5085b00824SAdam Dybkowski 		sched_ctx->init_worker_names[i] = NULL;
5150e14527SFan Zhang 
5285b00824SAdam Dybkowski 		sched_ctx->nb_init_workers -= 1;
5350e14527SFan Zhang 	}
5450e14527SFan Zhang 
5550e14527SFan Zhang 	return 0;
5650e14527SFan Zhang }
5757523e68SFan Zhang /** Configure device */
5857523e68SFan Zhang static int
5960e686c2SFan Zhang scheduler_pmd_config(struct rte_cryptodev *dev,
60b3bbd9e5SSlawomir Mrozowicz 		struct rte_cryptodev_config *config)
6157523e68SFan Zhang {
62b3bbd9e5SSlawomir Mrozowicz 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
63b3bbd9e5SSlawomir Mrozowicz 	uint32_t i;
6450e14527SFan Zhang 	int ret;
6550e14527SFan Zhang 
6685b00824SAdam Dybkowski 	/* although scheduler_attach_init_worker presents multiple times,
6750e14527SFan Zhang 	 * there will be only 1 meaningful execution.
6850e14527SFan Zhang 	 */
6985b00824SAdam Dybkowski 	ret = scheduler_attach_init_worker(dev);
7050e14527SFan Zhang 	if (ret < 0)
7150e14527SFan Zhang 		return ret;
7257523e68SFan Zhang 
7385b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
7485b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
75b3bbd9e5SSlawomir Mrozowicz 
7685b00824SAdam Dybkowski 		ret = rte_cryptodev_configure(worker_dev_id, config);
77b3bbd9e5SSlawomir Mrozowicz 		if (ret < 0)
78b3bbd9e5SSlawomir Mrozowicz 			break;
79b3bbd9e5SSlawomir Mrozowicz 	}
80b3bbd9e5SSlawomir Mrozowicz 
8157523e68SFan Zhang 	return ret;
8257523e68SFan Zhang }
8357523e68SFan Zhang 
8457523e68SFan Zhang static int
858a48e039SFan Zhang update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
8657523e68SFan Zhang {
8757523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
8857523e68SFan Zhang 	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
8957523e68SFan Zhang 
9057523e68SFan Zhang 	if (sched_ctx->reordering_enabled) {
918a48e039SFan Zhang 		char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
928a48e039SFan Zhang 		uint32_t buff_size = rte_align32pow2(
9385b00824SAdam Dybkowski 			sched_ctx->nb_workers * PER_WORKER_BUFF_SIZE);
9457523e68SFan Zhang 
958a48e039SFan Zhang 		if (qp_ctx->order_ring) {
968a48e039SFan Zhang 			rte_ring_free(qp_ctx->order_ring);
978a48e039SFan Zhang 			qp_ctx->order_ring = NULL;
9857523e68SFan Zhang 		}
9957523e68SFan Zhang 
10057523e68SFan Zhang 		if (!buff_size)
10157523e68SFan Zhang 			return 0;
10257523e68SFan Zhang 
1038a48e039SFan Zhang 		if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN,
10457523e68SFan Zhang 			"%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
10557523e68SFan Zhang 			dev->data->dev_id, qp_id) < 0) {
10685aa6d34SHari Kumar 			CR_SCHED_LOG(ERR, "failed to create unique reorder buffer"
10757523e68SFan Zhang 					"name");
10857523e68SFan Zhang 			return -ENOMEM;
10957523e68SFan Zhang 		}
11057523e68SFan Zhang 
1118a48e039SFan Zhang 		qp_ctx->order_ring = rte_ring_create(order_ring_name,
1128a48e039SFan Zhang 				buff_size, rte_socket_id(),
1138a48e039SFan Zhang 				RING_F_SP_ENQ | RING_F_SC_DEQ);
1148a48e039SFan Zhang 		if (!qp_ctx->order_ring) {
11585aa6d34SHari Kumar 			CR_SCHED_LOG(ERR, "failed to create order ring");
11657523e68SFan Zhang 			return -ENOMEM;
11757523e68SFan Zhang 		}
11857523e68SFan Zhang 	} else {
1198a48e039SFan Zhang 		if (qp_ctx->order_ring) {
1208a48e039SFan Zhang 			rte_ring_free(qp_ctx->order_ring);
1218a48e039SFan Zhang 			qp_ctx->order_ring = NULL;
12257523e68SFan Zhang 		}
12357523e68SFan Zhang 	}
12457523e68SFan Zhang 
12557523e68SFan Zhang 	return 0;
12657523e68SFan Zhang }
12757523e68SFan Zhang 
12857523e68SFan Zhang /** Start device */
12957523e68SFan Zhang static int
13057523e68SFan Zhang scheduler_pmd_start(struct rte_cryptodev *dev)
13157523e68SFan Zhang {
13257523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
13357523e68SFan Zhang 	uint32_t i;
13457523e68SFan Zhang 	int ret;
13557523e68SFan Zhang 
13657523e68SFan Zhang 	if (dev->data->dev_started)
13757523e68SFan Zhang 		return 0;
13857523e68SFan Zhang 
13985b00824SAdam Dybkowski 	/* although scheduler_attach_init_worker presents multiple times,
14050e14527SFan Zhang 	 * there will be only 1 meaningful execution.
14150e14527SFan Zhang 	 */
14285b00824SAdam Dybkowski 	ret = scheduler_attach_init_worker(dev);
14350e14527SFan Zhang 	if (ret < 0)
14450e14527SFan Zhang 		return ret;
14550e14527SFan Zhang 
14657523e68SFan Zhang 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1478a48e039SFan Zhang 		ret = update_order_ring(dev, i);
14857523e68SFan Zhang 		if (ret < 0) {
14985aa6d34SHari Kumar 			CR_SCHED_LOG(ERR, "Failed to update reorder buffer");
15057523e68SFan Zhang 			return ret;
15157523e68SFan Zhang 		}
15257523e68SFan Zhang 	}
15357523e68SFan Zhang 
15457523e68SFan Zhang 	if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) {
15585aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Scheduler mode is not set");
15657523e68SFan Zhang 		return -1;
15757523e68SFan Zhang 	}
15857523e68SFan Zhang 
15985b00824SAdam Dybkowski 	if (!sched_ctx->nb_workers) {
16085b00824SAdam Dybkowski 		CR_SCHED_LOG(ERR, "No worker in the scheduler");
16157523e68SFan Zhang 		return -1;
16257523e68SFan Zhang 	}
16357523e68SFan Zhang 
1648f1d23ecSDavid Marchand 	if (*sched_ctx->ops.worker_attach == NULL)
1658f1d23ecSDavid Marchand 		return -ENOTSUP;
16657523e68SFan Zhang 
16785b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
16885b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
16957523e68SFan Zhang 
17085b00824SAdam Dybkowski 		if ((*sched_ctx->ops.worker_attach)(dev, worker_dev_id) < 0) {
17185b00824SAdam Dybkowski 			CR_SCHED_LOG(ERR, "Failed to attach worker");
17257523e68SFan Zhang 			return -ENOTSUP;
17357523e68SFan Zhang 		}
17457523e68SFan Zhang 	}
17557523e68SFan Zhang 
1768f1d23ecSDavid Marchand 	if (*sched_ctx->ops.scheduler_start == NULL)
1778f1d23ecSDavid Marchand 		return -ENOTSUP;
17857523e68SFan Zhang 
17957523e68SFan Zhang 	if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
18085aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Scheduler start failed");
18157523e68SFan Zhang 		return -1;
18257523e68SFan Zhang 	}
18357523e68SFan Zhang 
18485b00824SAdam Dybkowski 	/* start all workers */
18585b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
18685b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
187beb4c305SAkhil Goyal 		ret = rte_cryptodev_start(worker_dev_id);
18857523e68SFan Zhang 		if (ret < 0) {
18985b00824SAdam Dybkowski 			CR_SCHED_LOG(ERR, "Failed to start worker dev %u",
19085b00824SAdam Dybkowski 					worker_dev_id);
19157523e68SFan Zhang 			return ret;
19257523e68SFan Zhang 		}
19357523e68SFan Zhang 	}
19457523e68SFan Zhang 
19557523e68SFan Zhang 	return 0;
19657523e68SFan Zhang }
19757523e68SFan Zhang 
19857523e68SFan Zhang /** Stop device */
19957523e68SFan Zhang static void
20057523e68SFan Zhang scheduler_pmd_stop(struct rte_cryptodev *dev)
20157523e68SFan Zhang {
20257523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
20357523e68SFan Zhang 	uint32_t i;
20457523e68SFan Zhang 
20557523e68SFan Zhang 	if (!dev->data->dev_started)
20657523e68SFan Zhang 		return;
20757523e68SFan Zhang 
20885b00824SAdam Dybkowski 	/* stop all workers first */
20985b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
21085b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
21157523e68SFan Zhang 
212beb4c305SAkhil Goyal 		rte_cryptodev_stop(worker_dev_id);
21357523e68SFan Zhang 	}
21457523e68SFan Zhang 
21557523e68SFan Zhang 	if (*sched_ctx->ops.scheduler_stop)
21657523e68SFan Zhang 		(*sched_ctx->ops.scheduler_stop)(dev);
21757523e68SFan Zhang 
21885b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
21985b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
22057523e68SFan Zhang 
22185b00824SAdam Dybkowski 		if (*sched_ctx->ops.worker_detach)
22285b00824SAdam Dybkowski 			(*sched_ctx->ops.worker_detach)(dev, worker_dev_id);
22357523e68SFan Zhang 	}
22457523e68SFan Zhang }
22557523e68SFan Zhang 
22657523e68SFan Zhang /** Close device */
22757523e68SFan Zhang static int
22857523e68SFan Zhang scheduler_pmd_close(struct rte_cryptodev *dev)
22957523e68SFan Zhang {
23057523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
23157523e68SFan Zhang 	uint32_t i;
23257523e68SFan Zhang 	int ret;
23357523e68SFan Zhang 
23457523e68SFan Zhang 	/* the dev should be stopped before being closed */
23557523e68SFan Zhang 	if (dev->data->dev_started)
23657523e68SFan Zhang 		return -EBUSY;
23757523e68SFan Zhang 
23885b00824SAdam Dybkowski 	/* close all workers first */
23985b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
24085b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
24185b00824SAdam Dybkowski 		struct rte_cryptodev *worker_dev =
24285b00824SAdam Dybkowski 				rte_cryptodev_pmd_get_dev(worker_dev_id);
24357523e68SFan Zhang 
24485b00824SAdam Dybkowski 		ret = (*worker_dev->dev_ops->dev_close)(worker_dev);
24557523e68SFan Zhang 		if (ret < 0)
24657523e68SFan Zhang 			return ret;
24757523e68SFan Zhang 	}
24857523e68SFan Zhang 
24957523e68SFan Zhang 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
25057523e68SFan Zhang 		struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
25157523e68SFan Zhang 
2528a48e039SFan Zhang 		if (qp_ctx->order_ring) {
2538a48e039SFan Zhang 			rte_ring_free(qp_ctx->order_ring);
2548a48e039SFan Zhang 			qp_ctx->order_ring = NULL;
25557523e68SFan Zhang 		}
25657523e68SFan Zhang 
25757523e68SFan Zhang 		if (qp_ctx->private_qp_ctx) {
25857523e68SFan Zhang 			rte_free(qp_ctx->private_qp_ctx);
25957523e68SFan Zhang 			qp_ctx->private_qp_ctx = NULL;
26057523e68SFan Zhang 		}
26157523e68SFan Zhang 	}
26257523e68SFan Zhang 
26306f0a569SPablo de Lara 	if (sched_ctx->private_ctx) {
26457523e68SFan Zhang 		rte_free(sched_ctx->private_ctx);
26506f0a569SPablo de Lara 		sched_ctx->private_ctx = NULL;
26606f0a569SPablo de Lara 	}
26757523e68SFan Zhang 
26806f0a569SPablo de Lara 	if (sched_ctx->capabilities) {
26957523e68SFan Zhang 		rte_free(sched_ctx->capabilities);
27006f0a569SPablo de Lara 		sched_ctx->capabilities = NULL;
27106f0a569SPablo de Lara 	}
27257523e68SFan Zhang 
27357523e68SFan Zhang 	return 0;
27457523e68SFan Zhang }
27557523e68SFan Zhang 
27657523e68SFan Zhang /** Get device statistics */
27757523e68SFan Zhang static void
27857523e68SFan Zhang scheduler_pmd_stats_get(struct rte_cryptodev *dev,
27957523e68SFan Zhang 	struct rte_cryptodev_stats *stats)
28057523e68SFan Zhang {
28157523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
28257523e68SFan Zhang 	uint32_t i;
28357523e68SFan Zhang 
28485b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
28585b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
28685b00824SAdam Dybkowski 		struct rte_cryptodev *worker_dev =
28785b00824SAdam Dybkowski 				rte_cryptodev_pmd_get_dev(worker_dev_id);
28885b00824SAdam Dybkowski 		struct rte_cryptodev_stats worker_stats = {0};
28957523e68SFan Zhang 
29085b00824SAdam Dybkowski 		(*worker_dev->dev_ops->stats_get)(worker_dev, &worker_stats);
29157523e68SFan Zhang 
29285b00824SAdam Dybkowski 		stats->enqueued_count += worker_stats.enqueued_count;
29385b00824SAdam Dybkowski 		stats->dequeued_count += worker_stats.dequeued_count;
29457523e68SFan Zhang 
29585b00824SAdam Dybkowski 		stats->enqueue_err_count += worker_stats.enqueue_err_count;
29685b00824SAdam Dybkowski 		stats->dequeue_err_count += worker_stats.dequeue_err_count;
29757523e68SFan Zhang 	}
29857523e68SFan Zhang }
29957523e68SFan Zhang 
30057523e68SFan Zhang /** Reset device statistics */
30157523e68SFan Zhang static void
30257523e68SFan Zhang scheduler_pmd_stats_reset(struct rte_cryptodev *dev)
30357523e68SFan Zhang {
30457523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
30557523e68SFan Zhang 	uint32_t i;
30657523e68SFan Zhang 
30785b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
30885b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
30985b00824SAdam Dybkowski 		struct rte_cryptodev *worker_dev =
31085b00824SAdam Dybkowski 				rte_cryptodev_pmd_get_dev(worker_dev_id);
31157523e68SFan Zhang 
31285b00824SAdam Dybkowski 		(*worker_dev->dev_ops->stats_reset)(worker_dev);
31357523e68SFan Zhang 	}
31457523e68SFan Zhang }
31557523e68SFan Zhang 
31657523e68SFan Zhang /** Get device info */
31757523e68SFan Zhang static void
31857523e68SFan Zhang scheduler_pmd_info_get(struct rte_cryptodev *dev,
31957523e68SFan Zhang 		struct rte_cryptodev_info *dev_info)
32057523e68SFan Zhang {
32157523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
322e1fc5b76SPablo de Lara 	uint32_t max_nb_sess = 0;
3236c8e8dfeSAnoob Joseph 	uint16_t headroom_sz = 0;
3246c8e8dfeSAnoob Joseph 	uint16_t tailroom_sz = 0;
32557523e68SFan Zhang 	uint32_t i;
32657523e68SFan Zhang 
32757523e68SFan Zhang 	if (!dev_info)
32857523e68SFan Zhang 		return;
32957523e68SFan Zhang 
33085b00824SAdam Dybkowski 	/* although scheduler_attach_init_worker presents multiple times,
33150e14527SFan Zhang 	 * there will be only 1 meaningful execution.
33250e14527SFan Zhang 	 */
33385b00824SAdam Dybkowski 	scheduler_attach_init_worker(dev);
33450e14527SFan Zhang 
33585b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
33685b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
33785b00824SAdam Dybkowski 		struct rte_cryptodev_info worker_info;
33857523e68SFan Zhang 
33985b00824SAdam Dybkowski 		rte_cryptodev_info_get(worker_dev_id, &worker_info);
34085b00824SAdam Dybkowski 		uint32_t dev_max_sess = worker_info.sym.max_nb_sessions;
341e1fc5b76SPablo de Lara 		if (dev_max_sess != 0) {
342e1fc5b76SPablo de Lara 			if (max_nb_sess == 0 ||	dev_max_sess < max_nb_sess)
34385b00824SAdam Dybkowski 				max_nb_sess = worker_info.sym.max_nb_sessions;
344e1fc5b76SPablo de Lara 		}
3456c8e8dfeSAnoob Joseph 
34685b00824SAdam Dybkowski 		/* Get the max headroom requirement among worker PMDs */
34785b00824SAdam Dybkowski 		headroom_sz = worker_info.min_mbuf_headroom_req >
3486c8e8dfeSAnoob Joseph 				headroom_sz ?
34985b00824SAdam Dybkowski 				worker_info.min_mbuf_headroom_req :
3506c8e8dfeSAnoob Joseph 				headroom_sz;
3516c8e8dfeSAnoob Joseph 
35285b00824SAdam Dybkowski 		/* Get the max tailroom requirement among worker PMDs */
35385b00824SAdam Dybkowski 		tailroom_sz = worker_info.min_mbuf_tailroom_req >
3546c8e8dfeSAnoob Joseph 				tailroom_sz ?
35585b00824SAdam Dybkowski 				worker_info.min_mbuf_tailroom_req :
3566c8e8dfeSAnoob Joseph 				tailroom_sz;
35757523e68SFan Zhang 	}
35857523e68SFan Zhang 
3597a364faeSSlawomir Mrozowicz 	dev_info->driver_id = dev->driver_id;
36057523e68SFan Zhang 	dev_info->feature_flags = dev->feature_flags;
36157523e68SFan Zhang 	dev_info->capabilities = sched_ctx->capabilities;
36257523e68SFan Zhang 	dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
3636c8e8dfeSAnoob Joseph 	dev_info->min_mbuf_headroom_req = headroom_sz;
3646c8e8dfeSAnoob Joseph 	dev_info->min_mbuf_tailroom_req = tailroom_sz;
365e1fc5b76SPablo de Lara 	dev_info->sym.max_nb_sessions = max_nb_sess;
36657523e68SFan Zhang }
36757523e68SFan Zhang 
36857523e68SFan Zhang /** Release queue pair */
36957523e68SFan Zhang static int
37057523e68SFan Zhang scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
37157523e68SFan Zhang {
37257523e68SFan Zhang 	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
37357523e68SFan Zhang 
37457523e68SFan Zhang 	if (!qp_ctx)
37557523e68SFan Zhang 		return 0;
37657523e68SFan Zhang 
3778a48e039SFan Zhang 	rte_ring_free(qp_ctx->order_ring);
37857523e68SFan Zhang 	rte_free(qp_ctx->private_qp_ctx);
37957523e68SFan Zhang 
38057523e68SFan Zhang 	rte_free(qp_ctx);
38157523e68SFan Zhang 	dev->data->queue_pairs[qp_id] = NULL;
38257523e68SFan Zhang 
38357523e68SFan Zhang 	return 0;
38457523e68SFan Zhang }
38557523e68SFan Zhang 
38657523e68SFan Zhang /** Setup a queue pair */
38757523e68SFan Zhang static int
38857523e68SFan Zhang scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
389725d2a7fSFan Zhang 	const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
39057523e68SFan Zhang {
39157523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
39257523e68SFan Zhang 	struct scheduler_qp_ctx *qp_ctx;
39357523e68SFan Zhang 	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
394c281019bSFan Zhang 	uint32_t i;
395c281019bSFan Zhang 	int ret;
39657523e68SFan Zhang 
39757523e68SFan Zhang 	if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
39857523e68SFan Zhang 			"CRYTO_SCHE PMD %u QP %u",
39957523e68SFan Zhang 			dev->data->dev_id, qp_id) < 0) {
40085aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Failed to create unique queue pair name");
40157523e68SFan Zhang 		return -EFAULT;
40257523e68SFan Zhang 	}
40357523e68SFan Zhang 
40457523e68SFan Zhang 	/* Free memory prior to re-allocation if needed. */
40557523e68SFan Zhang 	if (dev->data->queue_pairs[qp_id] != NULL)
40657523e68SFan Zhang 		scheduler_pmd_qp_release(dev, qp_id);
40757523e68SFan Zhang 
40885b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
40985b00824SAdam Dybkowski 		uint8_t worker_id = sched_ctx->workers[i].dev_id;
410c281019bSFan Zhang 
411f7db6f82SPablo de Lara 		/*
41285b00824SAdam Dybkowski 		 * All workers will share the same session mempool
413f7db6f82SPablo de Lara 		 * for session-less operations, so the objects
414f7db6f82SPablo de Lara 		 * must be big enough for all the drivers used.
415f7db6f82SPablo de Lara 		 */
41685b00824SAdam Dybkowski 		ret = rte_cryptodev_queue_pair_setup(worker_id, qp_id,
417725d2a7fSFan Zhang 				qp_conf, socket_id);
418c281019bSFan Zhang 		if (ret < 0)
419c281019bSFan Zhang 			return ret;
420c281019bSFan Zhang 	}
421c281019bSFan Zhang 
42257523e68SFan Zhang 	/* Allocate the queue pair data structure. */
42357523e68SFan Zhang 	qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
42457523e68SFan Zhang 			socket_id);
42557523e68SFan Zhang 	if (qp_ctx == NULL)
42657523e68SFan Zhang 		return -ENOMEM;
42757523e68SFan Zhang 
42888405476SFan Zhang 	/* The actual available object number = nb_descriptors - 1 */
42988405476SFan Zhang 	qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1;
43088405476SFan Zhang 
43157523e68SFan Zhang 	dev->data->queue_pairs[qp_id] = qp_ctx;
43257523e68SFan Zhang 
43385b00824SAdam Dybkowski 	/* although scheduler_attach_init_worker presents multiple times,
43450e14527SFan Zhang 	 * there will be only 1 meaningful execution.
43550e14527SFan Zhang 	 */
43685b00824SAdam Dybkowski 	ret = scheduler_attach_init_worker(dev);
43750e14527SFan Zhang 	if (ret < 0) {
43885b00824SAdam Dybkowski 		CR_SCHED_LOG(ERR, "Failed to attach worker");
43950e14527SFan Zhang 		scheduler_pmd_qp_release(dev, qp_id);
44050e14527SFan Zhang 		return ret;
44150e14527SFan Zhang 	}
44250e14527SFan Zhang 
44357523e68SFan Zhang 	if (*sched_ctx->ops.config_queue_pair) {
44457523e68SFan Zhang 		if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) {
44585aa6d34SHari Kumar 			CR_SCHED_LOG(ERR, "Unable to configure queue pair");
44657523e68SFan Zhang 			return -1;
44757523e68SFan Zhang 		}
44857523e68SFan Zhang 	}
44957523e68SFan Zhang 
45057523e68SFan Zhang 	return 0;
45157523e68SFan Zhang }
45257523e68SFan Zhang 
45357523e68SFan Zhang static uint32_t
454012c5076SPablo de Lara scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
45557523e68SFan Zhang {
456b3bbd9e5SSlawomir Mrozowicz 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
457b3bbd9e5SSlawomir Mrozowicz 	uint8_t i = 0;
458b3bbd9e5SSlawomir Mrozowicz 	uint32_t max_priv_sess_size = 0;
459b3bbd9e5SSlawomir Mrozowicz 
46085b00824SAdam Dybkowski 	/* Check what is the maximum private session size for all workers */
46185b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
46285b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
46385b00824SAdam Dybkowski 		struct rte_cryptodev *dev = &rte_cryptodevs[worker_dev_id];
464012c5076SPablo de Lara 		uint32_t priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
465b3bbd9e5SSlawomir Mrozowicz 
466b3bbd9e5SSlawomir Mrozowicz 		if (max_priv_sess_size < priv_sess_size)
467b3bbd9e5SSlawomir Mrozowicz 			max_priv_sess_size = priv_sess_size;
468b3bbd9e5SSlawomir Mrozowicz 	}
469b3bbd9e5SSlawomir Mrozowicz 
470b3bbd9e5SSlawomir Mrozowicz 	return max_priv_sess_size;
47157523e68SFan Zhang }
47257523e68SFan Zhang 
473*6812b9bfSFan Zhang struct scheduler_configured_sess_info {
474*6812b9bfSFan Zhang 	uint8_t dev_id;
475*6812b9bfSFan Zhang 	uint8_t driver_id;
476*6812b9bfSFan Zhang 	struct rte_cryptodev_sym_session *sess;
477*6812b9bfSFan Zhang };
478*6812b9bfSFan Zhang 
47957523e68SFan Zhang static int
480*6812b9bfSFan Zhang scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev,
481*6812b9bfSFan Zhang 	struct rte_crypto_sym_xform *xform,
482*6812b9bfSFan Zhang 	struct rte_cryptodev_sym_session *sess)
48357523e68SFan Zhang {
484*6812b9bfSFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
485*6812b9bfSFan Zhang 	struct rte_mempool *mp = rte_mempool_from_obj(sess);
486*6812b9bfSFan Zhang 	struct scheduler_session_ctx *sess_ctx = (void *)sess->driver_priv_data;
487*6812b9bfSFan Zhang 	struct scheduler_configured_sess_info configured_sess[
488*6812b9bfSFan Zhang 			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}};
489*6812b9bfSFan Zhang 	uint32_t i, j, n_configured_sess = 0;
490*6812b9bfSFan Zhang 	int ret = 0;
491*6812b9bfSFan Zhang 
492*6812b9bfSFan Zhang 	if (mp == NULL)
493*6812b9bfSFan Zhang 		return -EINVAL;
494*6812b9bfSFan Zhang 
495*6812b9bfSFan Zhang 	for (i = 0; i < sched_ctx->nb_workers; i++) {
496*6812b9bfSFan Zhang 		struct scheduler_worker *worker = &sched_ctx->workers[i];
497*6812b9bfSFan Zhang 		struct rte_cryptodev_sym_session *worker_sess;
498*6812b9bfSFan Zhang 		uint8_t next_worker = 0;
499*6812b9bfSFan Zhang 
500*6812b9bfSFan Zhang 		for (j = 0; j < n_configured_sess; j++) {
501*6812b9bfSFan Zhang 			if (configured_sess[j].driver_id ==
502*6812b9bfSFan Zhang 					worker->driver_id) {
503*6812b9bfSFan Zhang 				sess_ctx->worker_sess[i] =
504*6812b9bfSFan Zhang 					configured_sess[j].sess;
505*6812b9bfSFan Zhang 				next_worker = 1;
506*6812b9bfSFan Zhang 				break;
507*6812b9bfSFan Zhang 			}
508*6812b9bfSFan Zhang 		}
509*6812b9bfSFan Zhang 		if (next_worker)
510*6812b9bfSFan Zhang 			continue;
511*6812b9bfSFan Zhang 
512*6812b9bfSFan Zhang 		if (rte_mempool_avail_count(mp) == 0) {
513*6812b9bfSFan Zhang 			ret = -ENOMEM;
514*6812b9bfSFan Zhang 			goto error_exit;
515*6812b9bfSFan Zhang 		}
516*6812b9bfSFan Zhang 
517*6812b9bfSFan Zhang 		worker_sess = rte_cryptodev_sym_session_create(worker->dev_id,
518*6812b9bfSFan Zhang 			xform, mp);
519*6812b9bfSFan Zhang 		if (worker_sess == NULL) {
520*6812b9bfSFan Zhang 			ret = -rte_errno;
521*6812b9bfSFan Zhang 			goto error_exit;
522*6812b9bfSFan Zhang 		}
523*6812b9bfSFan Zhang 
524*6812b9bfSFan Zhang 		worker_sess->opaque_data = (uint64_t)sess;
525*6812b9bfSFan Zhang 		sess_ctx->worker_sess[i] = worker_sess;
526*6812b9bfSFan Zhang 		configured_sess[n_configured_sess].driver_id =
527*6812b9bfSFan Zhang 			worker->driver_id;
528*6812b9bfSFan Zhang 		configured_sess[n_configured_sess].dev_id = worker->dev_id;
529*6812b9bfSFan Zhang 		configured_sess[n_configured_sess].sess = worker_sess;
530*6812b9bfSFan Zhang 		n_configured_sess++;
531*6812b9bfSFan Zhang 	}
532*6812b9bfSFan Zhang 
53357523e68SFan Zhang 	return 0;
534*6812b9bfSFan Zhang error_exit:
535*6812b9bfSFan Zhang 	sess_ctx->ref_cnt = sched_ctx->ref_cnt;
536*6812b9bfSFan Zhang 	for (i = 0; i < n_configured_sess; i++)
537*6812b9bfSFan Zhang 		rte_cryptodev_sym_session_free(configured_sess[i].dev_id,
538*6812b9bfSFan Zhang 			configured_sess[i].sess);
539*6812b9bfSFan Zhang 	return ret;
54057523e68SFan Zhang }
54157523e68SFan Zhang 
54257523e68SFan Zhang /** Clear the memory of session so it doesn't leave key material behind */
54357523e68SFan Zhang static void
544*6812b9bfSFan Zhang scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev,
545*6812b9bfSFan Zhang 		struct rte_cryptodev_sym_session *sess)
546*6812b9bfSFan Zhang {
547*6812b9bfSFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
548*6812b9bfSFan Zhang 	struct scheduler_session_ctx *sess_ctx = (void *)sess->driver_priv_data;
549*6812b9bfSFan Zhang 	struct scheduler_configured_sess_info deleted_sess[
550*6812b9bfSFan Zhang 			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}};
551*6812b9bfSFan Zhang 	uint32_t i, j, n_deleted_sess = 0;
552*6812b9bfSFan Zhang 
553*6812b9bfSFan Zhang 	if (sched_ctx->ref_cnt != sess_ctx->ref_cnt) {
554*6812b9bfSFan Zhang 		CR_SCHED_LOG(WARNING,
555*6812b9bfSFan Zhang 			"Worker updated between session creation/deletion. "
556*6812b9bfSFan Zhang 			"The session may not be freed fully.");
557*6812b9bfSFan Zhang 	}
558*6812b9bfSFan Zhang 
559*6812b9bfSFan Zhang 	for (i = 0; i < sched_ctx->nb_workers; i++) {
560*6812b9bfSFan Zhang 		struct scheduler_worker *worker = &sched_ctx->workers[i];
561*6812b9bfSFan Zhang 		uint8_t next_worker = 0;
562*6812b9bfSFan Zhang 
563*6812b9bfSFan Zhang 		for (j = 0; j < n_deleted_sess; j++) {
564*6812b9bfSFan Zhang 			if (deleted_sess[j].driver_id == worker->driver_id) {
565*6812b9bfSFan Zhang 				sess_ctx->worker_sess[i] = NULL;
566*6812b9bfSFan Zhang 				next_worker = 1;
567*6812b9bfSFan Zhang 				break;
568*6812b9bfSFan Zhang 			}
569*6812b9bfSFan Zhang 		}
570*6812b9bfSFan Zhang 		if (next_worker)
571*6812b9bfSFan Zhang 			continue;
572*6812b9bfSFan Zhang 
573*6812b9bfSFan Zhang 		rte_cryptodev_sym_session_free(worker->dev_id,
574*6812b9bfSFan Zhang 			sess_ctx->worker_sess[i]);
575*6812b9bfSFan Zhang 
576*6812b9bfSFan Zhang 		deleted_sess[n_deleted_sess++].driver_id = worker->driver_id;
577*6812b9bfSFan Zhang 		sess_ctx->worker_sess[i] = NULL;
578*6812b9bfSFan Zhang 	}
579*6812b9bfSFan Zhang }
58057523e68SFan Zhang 
581b74fd6b8SFerruh Yigit static struct rte_cryptodev_ops scheduler_pmd_ops = {
58257523e68SFan Zhang 		.dev_configure		= scheduler_pmd_config,
58357523e68SFan Zhang 		.dev_start		= scheduler_pmd_start,
58457523e68SFan Zhang 		.dev_stop		= scheduler_pmd_stop,
58557523e68SFan Zhang 		.dev_close		= scheduler_pmd_close,
58657523e68SFan Zhang 
58757523e68SFan Zhang 		.stats_get		= scheduler_pmd_stats_get,
58857523e68SFan Zhang 		.stats_reset		= scheduler_pmd_stats_reset,
58957523e68SFan Zhang 
59057523e68SFan Zhang 		.dev_infos_get		= scheduler_pmd_info_get,
59157523e68SFan Zhang 
59257523e68SFan Zhang 		.queue_pair_setup	= scheduler_pmd_qp_setup,
59357523e68SFan Zhang 		.queue_pair_release	= scheduler_pmd_qp_release,
59457523e68SFan Zhang 
595012c5076SPablo de Lara 		.sym_session_get_size	= scheduler_pmd_sym_session_get_size,
596012c5076SPablo de Lara 		.sym_session_configure	= scheduler_pmd_sym_session_configure,
597012c5076SPablo de Lara 		.sym_session_clear	= scheduler_pmd_sym_session_clear,
59857523e68SFan Zhang };
59957523e68SFan Zhang 
60057523e68SFan Zhang struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;
601