xref: /dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c (revision 5566a3e35866ce9e5eacf886c27b460ebfcd6ee9)
1*5566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2*5566a3e3SBruce Richardson  * Copyright(c) 2017 Intel Corporation
357523e68SFan Zhang  */
457523e68SFan Zhang #include <string.h>
557523e68SFan Zhang 
657523e68SFan Zhang #include <rte_common.h>
757523e68SFan Zhang #include <rte_malloc.h>
857523e68SFan Zhang #include <rte_dev.h>
957523e68SFan Zhang #include <rte_cryptodev.h>
1057523e68SFan Zhang #include <rte_cryptodev_pmd.h>
1157523e68SFan Zhang #include <rte_reorder.h>
1257523e68SFan Zhang 
1357523e68SFan Zhang #include "scheduler_pmd_private.h"
1457523e68SFan Zhang 
1550e14527SFan Zhang /** attaching the slaves predefined by scheduler's EAL options */
1650e14527SFan Zhang static int
1750e14527SFan Zhang scheduler_attach_init_slave(struct rte_cryptodev *dev)
1850e14527SFan Zhang {
1950e14527SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
2050e14527SFan Zhang 	uint8_t scheduler_id = dev->data->dev_id;
2150e14527SFan Zhang 	int i;
2250e14527SFan Zhang 
2350e14527SFan Zhang 	for (i = sched_ctx->nb_init_slaves - 1; i >= 0; i--) {
2450e14527SFan Zhang 		const char *dev_name = sched_ctx->init_slave_names[i];
2550e14527SFan Zhang 		struct rte_cryptodev *slave_dev =
2650e14527SFan Zhang 				rte_cryptodev_pmd_get_named_dev(dev_name);
2750e14527SFan Zhang 		int status;
2850e14527SFan Zhang 
2950e14527SFan Zhang 		if (!slave_dev) {
3050e14527SFan Zhang 			CS_LOG_ERR("Failed to locate slave dev %s",
3150e14527SFan Zhang 					dev_name);
3250e14527SFan Zhang 			return -EINVAL;
3350e14527SFan Zhang 		}
3450e14527SFan Zhang 
3550e14527SFan Zhang 		status = rte_cryptodev_scheduler_slave_attach(
3650e14527SFan Zhang 				scheduler_id, slave_dev->data->dev_id);
3750e14527SFan Zhang 
3850e14527SFan Zhang 		if (status < 0) {
3950e14527SFan Zhang 			CS_LOG_ERR("Failed to attach slave cryptodev %u",
4050e14527SFan Zhang 					slave_dev->data->dev_id);
4150e14527SFan Zhang 			return status;
4250e14527SFan Zhang 		}
4350e14527SFan Zhang 
4450e14527SFan Zhang 		CS_LOG_INFO("Scheduler %s attached slave %s\n",
4550e14527SFan Zhang 				dev->data->name,
4650e14527SFan Zhang 				sched_ctx->init_slave_names[i]);
4750e14527SFan Zhang 
4850e14527SFan Zhang 		rte_free(sched_ctx->init_slave_names[i]);
4950e14527SFan Zhang 
5050e14527SFan Zhang 		sched_ctx->nb_init_slaves -= 1;
5150e14527SFan Zhang 	}
5250e14527SFan Zhang 
5350e14527SFan Zhang 	return 0;
5450e14527SFan Zhang }
5557523e68SFan Zhang /** Configure device */
5657523e68SFan Zhang static int
5760e686c2SFan Zhang scheduler_pmd_config(struct rte_cryptodev *dev,
58b3bbd9e5SSlawomir Mrozowicz 		struct rte_cryptodev_config *config)
5957523e68SFan Zhang {
60b3bbd9e5SSlawomir Mrozowicz 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
61b3bbd9e5SSlawomir Mrozowicz 	uint32_t i;
6250e14527SFan Zhang 	int ret;
6350e14527SFan Zhang 
6450e14527SFan Zhang 	/* although scheduler_attach_init_slave presents multiple times,
6550e14527SFan Zhang 	 * there will be only 1 meaningful execution.
6650e14527SFan Zhang 	 */
6750e14527SFan Zhang 	ret = scheduler_attach_init_slave(dev);
6850e14527SFan Zhang 	if (ret < 0)
6950e14527SFan Zhang 		return ret;
7057523e68SFan Zhang 
71b3bbd9e5SSlawomir Mrozowicz 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
72b3bbd9e5SSlawomir Mrozowicz 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
73b3bbd9e5SSlawomir Mrozowicz 
74f7db6f82SPablo de Lara 		ret = rte_cryptodev_configure(slave_dev_id, config);
75b3bbd9e5SSlawomir Mrozowicz 		if (ret < 0)
76b3bbd9e5SSlawomir Mrozowicz 			break;
77b3bbd9e5SSlawomir Mrozowicz 	}
78b3bbd9e5SSlawomir Mrozowicz 
7957523e68SFan Zhang 	return ret;
8057523e68SFan Zhang }
8157523e68SFan Zhang 
8257523e68SFan Zhang static int
838a48e039SFan Zhang update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
8457523e68SFan Zhang {
8557523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
8657523e68SFan Zhang 	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
8757523e68SFan Zhang 
8857523e68SFan Zhang 	if (sched_ctx->reordering_enabled) {
898a48e039SFan Zhang 		char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
908a48e039SFan Zhang 		uint32_t buff_size = rte_align32pow2(
918a48e039SFan Zhang 			sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE);
9257523e68SFan Zhang 
938a48e039SFan Zhang 		if (qp_ctx->order_ring) {
948a48e039SFan Zhang 			rte_ring_free(qp_ctx->order_ring);
958a48e039SFan Zhang 			qp_ctx->order_ring = NULL;
9657523e68SFan Zhang 		}
9757523e68SFan Zhang 
9857523e68SFan Zhang 		if (!buff_size)
9957523e68SFan Zhang 			return 0;
10057523e68SFan Zhang 
1018a48e039SFan Zhang 		if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN,
10257523e68SFan Zhang 			"%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
10357523e68SFan Zhang 			dev->data->dev_id, qp_id) < 0) {
10457523e68SFan Zhang 			CS_LOG_ERR("failed to create unique reorder buffer "
10557523e68SFan Zhang 					"name");
10657523e68SFan Zhang 			return -ENOMEM;
10757523e68SFan Zhang 		}
10857523e68SFan Zhang 
1098a48e039SFan Zhang 		qp_ctx->order_ring = rte_ring_create(order_ring_name,
1108a48e039SFan Zhang 				buff_size, rte_socket_id(),
1118a48e039SFan Zhang 				RING_F_SP_ENQ | RING_F_SC_DEQ);
1128a48e039SFan Zhang 		if (!qp_ctx->order_ring) {
1138a48e039SFan Zhang 			CS_LOG_ERR("failed to create order ring");
11457523e68SFan Zhang 			return -ENOMEM;
11557523e68SFan Zhang 		}
11657523e68SFan Zhang 	} else {
1178a48e039SFan Zhang 		if (qp_ctx->order_ring) {
1188a48e039SFan Zhang 			rte_ring_free(qp_ctx->order_ring);
1198a48e039SFan Zhang 			qp_ctx->order_ring = NULL;
12057523e68SFan Zhang 		}
12157523e68SFan Zhang 	}
12257523e68SFan Zhang 
12357523e68SFan Zhang 	return 0;
12457523e68SFan Zhang }
12557523e68SFan Zhang 
12657523e68SFan Zhang /** Start device */
12757523e68SFan Zhang static int
12857523e68SFan Zhang scheduler_pmd_start(struct rte_cryptodev *dev)
12957523e68SFan Zhang {
13057523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
13157523e68SFan Zhang 	uint32_t i;
13257523e68SFan Zhang 	int ret;
13357523e68SFan Zhang 
13457523e68SFan Zhang 	if (dev->data->dev_started)
13557523e68SFan Zhang 		return 0;
13657523e68SFan Zhang 
13750e14527SFan Zhang 	/* although scheduler_attach_init_slave presents multiple times,
13850e14527SFan Zhang 	 * there will be only 1 meaningful execution.
13950e14527SFan Zhang 	 */
14050e14527SFan Zhang 	ret = scheduler_attach_init_slave(dev);
14150e14527SFan Zhang 	if (ret < 0)
14250e14527SFan Zhang 		return ret;
14350e14527SFan Zhang 
14457523e68SFan Zhang 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1458a48e039SFan Zhang 		ret = update_order_ring(dev, i);
14657523e68SFan Zhang 		if (ret < 0) {
14757523e68SFan Zhang 			CS_LOG_ERR("Failed to update reorder buffer");
14857523e68SFan Zhang 			return ret;
14957523e68SFan Zhang 		}
15057523e68SFan Zhang 	}
15157523e68SFan Zhang 
15257523e68SFan Zhang 	if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) {
15357523e68SFan Zhang 		CS_LOG_ERR("Scheduler mode is not set");
15457523e68SFan Zhang 		return -1;
15557523e68SFan Zhang 	}
15657523e68SFan Zhang 
15757523e68SFan Zhang 	if (!sched_ctx->nb_slaves) {
15857523e68SFan Zhang 		CS_LOG_ERR("No slave in the scheduler");
15957523e68SFan Zhang 		return -1;
16057523e68SFan Zhang 	}
16157523e68SFan Zhang 
16257523e68SFan Zhang 	RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP);
16357523e68SFan Zhang 
16457523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
16557523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
16657523e68SFan Zhang 
16757523e68SFan Zhang 		if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) {
16857523e68SFan Zhang 			CS_LOG_ERR("Failed to attach slave");
16957523e68SFan Zhang 			return -ENOTSUP;
17057523e68SFan Zhang 		}
17157523e68SFan Zhang 	}
17257523e68SFan Zhang 
17357523e68SFan Zhang 	RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP);
17457523e68SFan Zhang 
17557523e68SFan Zhang 	if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
17657523e68SFan Zhang 		CS_LOG_ERR("Scheduler start failed");
17757523e68SFan Zhang 		return -1;
17857523e68SFan Zhang 	}
17957523e68SFan Zhang 
18057523e68SFan Zhang 	/* start all slaves */
18157523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
18257523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
18357523e68SFan Zhang 		struct rte_cryptodev *slave_dev =
18457523e68SFan Zhang 				rte_cryptodev_pmd_get_dev(slave_dev_id);
18557523e68SFan Zhang 
18657523e68SFan Zhang 		ret = (*slave_dev->dev_ops->dev_start)(slave_dev);
18757523e68SFan Zhang 		if (ret < 0) {
18857523e68SFan Zhang 			CS_LOG_ERR("Failed to start slave dev %u",
18957523e68SFan Zhang 					slave_dev_id);
19057523e68SFan Zhang 			return ret;
19157523e68SFan Zhang 		}
19257523e68SFan Zhang 	}
19357523e68SFan Zhang 
19457523e68SFan Zhang 	return 0;
19557523e68SFan Zhang }
19657523e68SFan Zhang 
19757523e68SFan Zhang /** Stop device */
19857523e68SFan Zhang static void
19957523e68SFan Zhang scheduler_pmd_stop(struct rte_cryptodev *dev)
20057523e68SFan Zhang {
20157523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
20257523e68SFan Zhang 	uint32_t i;
20357523e68SFan Zhang 
20457523e68SFan Zhang 	if (!dev->data->dev_started)
20557523e68SFan Zhang 		return;
20657523e68SFan Zhang 
20757523e68SFan Zhang 	/* stop all slaves first */
20857523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
20957523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
21057523e68SFan Zhang 		struct rte_cryptodev *slave_dev =
21157523e68SFan Zhang 				rte_cryptodev_pmd_get_dev(slave_dev_id);
21257523e68SFan Zhang 
21357523e68SFan Zhang 		(*slave_dev->dev_ops->dev_stop)(slave_dev);
21457523e68SFan Zhang 	}
21557523e68SFan Zhang 
21657523e68SFan Zhang 	if (*sched_ctx->ops.scheduler_stop)
21757523e68SFan Zhang 		(*sched_ctx->ops.scheduler_stop)(dev);
21857523e68SFan Zhang 
21957523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
22057523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
22157523e68SFan Zhang 
22257523e68SFan Zhang 		if (*sched_ctx->ops.slave_detach)
22357523e68SFan Zhang 			(*sched_ctx->ops.slave_detach)(dev, slave_dev_id);
22457523e68SFan Zhang 	}
22557523e68SFan Zhang }
22657523e68SFan Zhang 
22757523e68SFan Zhang /** Close device */
22857523e68SFan Zhang static int
22957523e68SFan Zhang scheduler_pmd_close(struct rte_cryptodev *dev)
23057523e68SFan Zhang {
23157523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
23257523e68SFan Zhang 	uint32_t i;
23357523e68SFan Zhang 	int ret;
23457523e68SFan Zhang 
23557523e68SFan Zhang 	/* the dev should be stopped before being closed */
23657523e68SFan Zhang 	if (dev->data->dev_started)
23757523e68SFan Zhang 		return -EBUSY;
23857523e68SFan Zhang 
23957523e68SFan Zhang 	/* close all slaves first */
24057523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
24157523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
24257523e68SFan Zhang 		struct rte_cryptodev *slave_dev =
24357523e68SFan Zhang 				rte_cryptodev_pmd_get_dev(slave_dev_id);
24457523e68SFan Zhang 
24557523e68SFan Zhang 		ret = (*slave_dev->dev_ops->dev_close)(slave_dev);
24657523e68SFan Zhang 		if (ret < 0)
24757523e68SFan Zhang 			return ret;
24857523e68SFan Zhang 	}
24957523e68SFan Zhang 
25057523e68SFan Zhang 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
25157523e68SFan Zhang 		struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
25257523e68SFan Zhang 
2538a48e039SFan Zhang 		if (qp_ctx->order_ring) {
2548a48e039SFan Zhang 			rte_ring_free(qp_ctx->order_ring);
2558a48e039SFan Zhang 			qp_ctx->order_ring = NULL;
25657523e68SFan Zhang 		}
25757523e68SFan Zhang 
25857523e68SFan Zhang 		if (qp_ctx->private_qp_ctx) {
25957523e68SFan Zhang 			rte_free(qp_ctx->private_qp_ctx);
26057523e68SFan Zhang 			qp_ctx->private_qp_ctx = NULL;
26157523e68SFan Zhang 		}
26257523e68SFan Zhang 	}
26357523e68SFan Zhang 
26457523e68SFan Zhang 	if (sched_ctx->private_ctx)
26557523e68SFan Zhang 		rte_free(sched_ctx->private_ctx);
26657523e68SFan Zhang 
26757523e68SFan Zhang 	if (sched_ctx->capabilities)
26857523e68SFan Zhang 		rte_free(sched_ctx->capabilities);
26957523e68SFan Zhang 
27057523e68SFan Zhang 	return 0;
27157523e68SFan Zhang }
27257523e68SFan Zhang 
27357523e68SFan Zhang /** Get device statistics */
27457523e68SFan Zhang static void
27557523e68SFan Zhang scheduler_pmd_stats_get(struct rte_cryptodev *dev,
27657523e68SFan Zhang 	struct rte_cryptodev_stats *stats)
27757523e68SFan Zhang {
27857523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
27957523e68SFan Zhang 	uint32_t i;
28057523e68SFan Zhang 
28157523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
28257523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
28357523e68SFan Zhang 		struct rte_cryptodev *slave_dev =
28457523e68SFan Zhang 				rte_cryptodev_pmd_get_dev(slave_dev_id);
28557523e68SFan Zhang 		struct rte_cryptodev_stats slave_stats = {0};
28657523e68SFan Zhang 
28757523e68SFan Zhang 		(*slave_dev->dev_ops->stats_get)(slave_dev, &slave_stats);
28857523e68SFan Zhang 
28957523e68SFan Zhang 		stats->enqueued_count += slave_stats.enqueued_count;
29057523e68SFan Zhang 		stats->dequeued_count += slave_stats.dequeued_count;
29157523e68SFan Zhang 
29257523e68SFan Zhang 		stats->enqueue_err_count += slave_stats.enqueue_err_count;
29357523e68SFan Zhang 		stats->dequeue_err_count += slave_stats.dequeue_err_count;
29457523e68SFan Zhang 	}
29557523e68SFan Zhang }
29657523e68SFan Zhang 
29757523e68SFan Zhang /** Reset device statistics */
29857523e68SFan Zhang static void
29957523e68SFan Zhang scheduler_pmd_stats_reset(struct rte_cryptodev *dev)
30057523e68SFan Zhang {
30157523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
30257523e68SFan Zhang 	uint32_t i;
30357523e68SFan Zhang 
30457523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
30557523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
30657523e68SFan Zhang 		struct rte_cryptodev *slave_dev =
30757523e68SFan Zhang 				rte_cryptodev_pmd_get_dev(slave_dev_id);
30857523e68SFan Zhang 
30957523e68SFan Zhang 		(*slave_dev->dev_ops->stats_reset)(slave_dev);
31057523e68SFan Zhang 	}
31157523e68SFan Zhang }
31257523e68SFan Zhang 
31357523e68SFan Zhang /** Get device info */
31457523e68SFan Zhang static void
31557523e68SFan Zhang scheduler_pmd_info_get(struct rte_cryptodev *dev,
31657523e68SFan Zhang 		struct rte_cryptodev_info *dev_info)
31757523e68SFan Zhang {
31857523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
31957523e68SFan Zhang 	uint32_t max_nb_sessions = sched_ctx->nb_slaves ?
320f2f020d2SDeclan Doherty 			UINT32_MAX : RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS;
32157523e68SFan Zhang 	uint32_t i;
32257523e68SFan Zhang 
32357523e68SFan Zhang 	if (!dev_info)
32457523e68SFan Zhang 		return;
32557523e68SFan Zhang 
32650e14527SFan Zhang 	/* although scheduler_attach_init_slave presents multiple times,
32750e14527SFan Zhang 	 * there will be only 1 meaningful execution.
32850e14527SFan Zhang 	 */
32950e14527SFan Zhang 	scheduler_attach_init_slave(dev);
33050e14527SFan Zhang 
33157523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
33257523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
33357523e68SFan Zhang 		struct rte_cryptodev_info slave_info;
33457523e68SFan Zhang 
33557523e68SFan Zhang 		rte_cryptodev_info_get(slave_dev_id, &slave_info);
33657523e68SFan Zhang 		max_nb_sessions = slave_info.sym.max_nb_sessions <
33757523e68SFan Zhang 				max_nb_sessions ?
33857523e68SFan Zhang 				slave_info.sym.max_nb_sessions :
33957523e68SFan Zhang 				max_nb_sessions;
34057523e68SFan Zhang 	}
34157523e68SFan Zhang 
3427a364faeSSlawomir Mrozowicz 	dev_info->driver_id = dev->driver_id;
34357523e68SFan Zhang 	dev_info->feature_flags = dev->feature_flags;
34457523e68SFan Zhang 	dev_info->capabilities = sched_ctx->capabilities;
34557523e68SFan Zhang 	dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
34657523e68SFan Zhang 	dev_info->sym.max_nb_sessions = max_nb_sessions;
34757523e68SFan Zhang }
34857523e68SFan Zhang 
34957523e68SFan Zhang /** Release queue pair */
35057523e68SFan Zhang static int
35157523e68SFan Zhang scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
35257523e68SFan Zhang {
35357523e68SFan Zhang 	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
35457523e68SFan Zhang 
35557523e68SFan Zhang 	if (!qp_ctx)
35657523e68SFan Zhang 		return 0;
35757523e68SFan Zhang 
3588a48e039SFan Zhang 	if (qp_ctx->order_ring)
3598a48e039SFan Zhang 		rte_ring_free(qp_ctx->order_ring);
36057523e68SFan Zhang 	if (qp_ctx->private_qp_ctx)
36157523e68SFan Zhang 		rte_free(qp_ctx->private_qp_ctx);
36257523e68SFan Zhang 
36357523e68SFan Zhang 	rte_free(qp_ctx);
36457523e68SFan Zhang 	dev->data->queue_pairs[qp_id] = NULL;
36557523e68SFan Zhang 
36657523e68SFan Zhang 	return 0;
36757523e68SFan Zhang }
36857523e68SFan Zhang 
36957523e68SFan Zhang /** Setup a queue pair */
37057523e68SFan Zhang static int
37157523e68SFan Zhang scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
372f7db6f82SPablo de Lara 	const struct rte_cryptodev_qp_conf *qp_conf, int socket_id,
373f7db6f82SPablo de Lara 	struct rte_mempool *session_pool)
37457523e68SFan Zhang {
37557523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
37657523e68SFan Zhang 	struct scheduler_qp_ctx *qp_ctx;
37757523e68SFan Zhang 	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
378c281019bSFan Zhang 	uint32_t i;
379c281019bSFan Zhang 	int ret;
38057523e68SFan Zhang 
38157523e68SFan Zhang 	if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
38257523e68SFan Zhang 			"CRYTO_SCHE PMD %u QP %u",
38357523e68SFan Zhang 			dev->data->dev_id, qp_id) < 0) {
38457523e68SFan Zhang 		CS_LOG_ERR("Failed to create unique queue pair name");
38557523e68SFan Zhang 		return -EFAULT;
38657523e68SFan Zhang 	}
38757523e68SFan Zhang 
38857523e68SFan Zhang 	/* Free memory prior to re-allocation if needed. */
38957523e68SFan Zhang 	if (dev->data->queue_pairs[qp_id] != NULL)
39057523e68SFan Zhang 		scheduler_pmd_qp_release(dev, qp_id);
39157523e68SFan Zhang 
392c281019bSFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
393c281019bSFan Zhang 		uint8_t slave_id = sched_ctx->slaves[i].dev_id;
394c281019bSFan Zhang 
395f7db6f82SPablo de Lara 		/*
396f7db6f82SPablo de Lara 		 * All slaves will share the same session mempool
397f7db6f82SPablo de Lara 		 * for session-less operations, so the objects
398f7db6f82SPablo de Lara 		 * must be big enough for all the drivers used.
399f7db6f82SPablo de Lara 		 */
400c281019bSFan Zhang 		ret = rte_cryptodev_queue_pair_setup(slave_id, qp_id,
401f7db6f82SPablo de Lara 				qp_conf, socket_id, session_pool);
402c281019bSFan Zhang 		if (ret < 0)
403c281019bSFan Zhang 			return ret;
404c281019bSFan Zhang 	}
405c281019bSFan Zhang 
40657523e68SFan Zhang 	/* Allocate the queue pair data structure. */
40757523e68SFan Zhang 	qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
40857523e68SFan Zhang 			socket_id);
40957523e68SFan Zhang 	if (qp_ctx == NULL)
41057523e68SFan Zhang 		return -ENOMEM;
41157523e68SFan Zhang 
41288405476SFan Zhang 	/* The actual available object number = nb_descriptors - 1 */
41388405476SFan Zhang 	qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1;
41488405476SFan Zhang 
41557523e68SFan Zhang 	dev->data->queue_pairs[qp_id] = qp_ctx;
41657523e68SFan Zhang 
41750e14527SFan Zhang 	/* although scheduler_attach_init_slave presents multiple times,
41850e14527SFan Zhang 	 * there will be only 1 meaningful execution.
41950e14527SFan Zhang 	 */
42050e14527SFan Zhang 	ret = scheduler_attach_init_slave(dev);
42150e14527SFan Zhang 	if (ret < 0) {
42250e14527SFan Zhang 		CS_LOG_ERR("Failed to attach slave");
42350e14527SFan Zhang 		scheduler_pmd_qp_release(dev, qp_id);
42450e14527SFan Zhang 		return ret;
42550e14527SFan Zhang 	}
42650e14527SFan Zhang 
42757523e68SFan Zhang 	if (*sched_ctx->ops.config_queue_pair) {
42857523e68SFan Zhang 		if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) {
42957523e68SFan Zhang 			CS_LOG_ERR("Unable to configure queue pair");
43057523e68SFan Zhang 			return -1;
43157523e68SFan Zhang 		}
43257523e68SFan Zhang 	}
43357523e68SFan Zhang 
43457523e68SFan Zhang 	return 0;
43557523e68SFan Zhang }
43657523e68SFan Zhang 
43757523e68SFan Zhang /** Start queue pair */
43857523e68SFan Zhang static int
43957523e68SFan Zhang scheduler_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
44057523e68SFan Zhang 		__rte_unused uint16_t queue_pair_id)
44157523e68SFan Zhang {
44257523e68SFan Zhang 	return -ENOTSUP;
44357523e68SFan Zhang }
44457523e68SFan Zhang 
44557523e68SFan Zhang /** Stop queue pair */
44657523e68SFan Zhang static int
44757523e68SFan Zhang scheduler_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
44857523e68SFan Zhang 		__rte_unused uint16_t queue_pair_id)
44957523e68SFan Zhang {
45057523e68SFan Zhang 	return -ENOTSUP;
45157523e68SFan Zhang }
45257523e68SFan Zhang 
45357523e68SFan Zhang /** Return the number of allocated queue pairs */
45457523e68SFan Zhang static uint32_t
45557523e68SFan Zhang scheduler_pmd_qp_count(struct rte_cryptodev *dev)
45657523e68SFan Zhang {
45757523e68SFan Zhang 	return dev->data->nb_queue_pairs;
45857523e68SFan Zhang }
45957523e68SFan Zhang 
46057523e68SFan Zhang static uint32_t
46157523e68SFan Zhang scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
46257523e68SFan Zhang {
463b3bbd9e5SSlawomir Mrozowicz 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
464b3bbd9e5SSlawomir Mrozowicz 	uint8_t i = 0;
465b3bbd9e5SSlawomir Mrozowicz 	uint32_t max_priv_sess_size = 0;
466b3bbd9e5SSlawomir Mrozowicz 
467b3bbd9e5SSlawomir Mrozowicz 	/* Check what is the maximum private session size for all slaves */
468b3bbd9e5SSlawomir Mrozowicz 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
469b3bbd9e5SSlawomir Mrozowicz 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
470b3bbd9e5SSlawomir Mrozowicz 		struct rte_cryptodev *dev = &rte_cryptodevs[slave_dev_id];
471b3bbd9e5SSlawomir Mrozowicz 		uint32_t priv_sess_size = (*dev->dev_ops->session_get_size)(dev);
472b3bbd9e5SSlawomir Mrozowicz 
473b3bbd9e5SSlawomir Mrozowicz 		if (max_priv_sess_size < priv_sess_size)
474b3bbd9e5SSlawomir Mrozowicz 			max_priv_sess_size = priv_sess_size;
475b3bbd9e5SSlawomir Mrozowicz 	}
476b3bbd9e5SSlawomir Mrozowicz 
477b3bbd9e5SSlawomir Mrozowicz 	return max_priv_sess_size;
47857523e68SFan Zhang }
47957523e68SFan Zhang 
48057523e68SFan Zhang static int
481b3bbd9e5SSlawomir Mrozowicz scheduler_pmd_session_configure(struct rte_cryptodev *dev,
48257523e68SFan Zhang 	struct rte_crypto_sym_xform *xform,
483b3bbd9e5SSlawomir Mrozowicz 	struct rte_cryptodev_sym_session *sess,
484b3bbd9e5SSlawomir Mrozowicz 	struct rte_mempool *mempool)
48557523e68SFan Zhang {
486b3bbd9e5SSlawomir Mrozowicz 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
48757523e68SFan Zhang 	uint32_t i;
48827391b53SPablo de Lara 	int ret;
48957523e68SFan Zhang 
49057523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
49157523e68SFan Zhang 		struct scheduler_slave *slave = &sched_ctx->slaves[i];
49257523e68SFan Zhang 
49327391b53SPablo de Lara 		ret = rte_cryptodev_sym_session_init(slave->dev_id, sess,
49427391b53SPablo de Lara 					xform, mempool);
49527391b53SPablo de Lara 		if (ret < 0) {
496b3bbd9e5SSlawomir Mrozowicz 			CS_LOG_ERR("unabled to config sym session");
49727391b53SPablo de Lara 			return ret;
49857523e68SFan Zhang 		}
49957523e68SFan Zhang 	}
50057523e68SFan Zhang 
50157523e68SFan Zhang 	return 0;
50257523e68SFan Zhang }
50357523e68SFan Zhang 
50457523e68SFan Zhang /** Clear the memory of session so it doesn't leave key material behind */
50557523e68SFan Zhang static void
50657523e68SFan Zhang scheduler_pmd_session_clear(struct rte_cryptodev *dev,
507b3bbd9e5SSlawomir Mrozowicz 		struct rte_cryptodev_sym_session *sess)
50857523e68SFan Zhang {
50957523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
510b3bbd9e5SSlawomir Mrozowicz 	uint32_t i;
51157523e68SFan Zhang 
512b3bbd9e5SSlawomir Mrozowicz 	/* Clear private data of slaves */
513b3bbd9e5SSlawomir Mrozowicz 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
514b3bbd9e5SSlawomir Mrozowicz 		struct scheduler_slave *slave = &sched_ctx->slaves[i];
51557523e68SFan Zhang 
516b3bbd9e5SSlawomir Mrozowicz 		rte_cryptodev_sym_session_clear(slave->dev_id, sess);
51757523e68SFan Zhang 	}
51857523e68SFan Zhang }
51957523e68SFan Zhang 
52057523e68SFan Zhang struct rte_cryptodev_ops scheduler_pmd_ops = {
52157523e68SFan Zhang 		.dev_configure		= scheduler_pmd_config,
52257523e68SFan Zhang 		.dev_start		= scheduler_pmd_start,
52357523e68SFan Zhang 		.dev_stop		= scheduler_pmd_stop,
52457523e68SFan Zhang 		.dev_close		= scheduler_pmd_close,
52557523e68SFan Zhang 
52657523e68SFan Zhang 		.stats_get		= scheduler_pmd_stats_get,
52757523e68SFan Zhang 		.stats_reset		= scheduler_pmd_stats_reset,
52857523e68SFan Zhang 
52957523e68SFan Zhang 		.dev_infos_get		= scheduler_pmd_info_get,
53057523e68SFan Zhang 
53157523e68SFan Zhang 		.queue_pair_setup	= scheduler_pmd_qp_setup,
53257523e68SFan Zhang 		.queue_pair_release	= scheduler_pmd_qp_release,
53357523e68SFan Zhang 		.queue_pair_start	= scheduler_pmd_qp_start,
53457523e68SFan Zhang 		.queue_pair_stop	= scheduler_pmd_qp_stop,
53557523e68SFan Zhang 		.queue_pair_count	= scheduler_pmd_qp_count,
53657523e68SFan Zhang 
53757523e68SFan Zhang 		.session_get_size	= scheduler_pmd_session_get_size,
53857523e68SFan Zhang 		.session_configure	= scheduler_pmd_session_configure,
53957523e68SFan Zhang 		.session_clear		= scheduler_pmd_session_clear,
54057523e68SFan Zhang };
54157523e68SFan Zhang 
54257523e68SFan Zhang struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;
543