xref: /dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c (revision 27391b53b3fceb84e21173fa663650c3912dffca)
157523e68SFan Zhang /*-
257523e68SFan Zhang  *   BSD LICENSE
357523e68SFan Zhang  *
457523e68SFan Zhang  *   Copyright(c) 2017 Intel Corporation. All rights reserved.
557523e68SFan Zhang  *
657523e68SFan Zhang  *   Redistribution and use in source and binary forms, with or without
757523e68SFan Zhang  *   modification, are permitted provided that the following conditions
857523e68SFan Zhang  *   are met:
957523e68SFan Zhang  *
1057523e68SFan Zhang  *     * Redistributions of source code must retain the above copyright
1157523e68SFan Zhang  *       notice, this list of conditions and the following disclaimer.
1257523e68SFan Zhang  *     * Redistributions in binary form must reproduce the above copyright
1357523e68SFan Zhang  *       notice, this list of conditions and the following disclaimer in
1457523e68SFan Zhang  *       the documentation and/or other materials provided with the
1557523e68SFan Zhang  *       distribution.
1657523e68SFan Zhang  *     * Neither the name of Intel Corporation nor the names of its
1757523e68SFan Zhang  *       contributors may be used to endorse or promote products derived
1857523e68SFan Zhang  *       from this software without specific prior written permission.
1957523e68SFan Zhang  *
2057523e68SFan Zhang  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
2157523e68SFan Zhang  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
2257523e68SFan Zhang  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
2357523e68SFan Zhang  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
2457523e68SFan Zhang  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2557523e68SFan Zhang  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2657523e68SFan Zhang  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2757523e68SFan Zhang  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2857523e68SFan Zhang  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2957523e68SFan Zhang  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
3057523e68SFan Zhang  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3157523e68SFan Zhang  */
3257523e68SFan Zhang #include <string.h>
3357523e68SFan Zhang 
3457523e68SFan Zhang #include <rte_config.h>
3557523e68SFan Zhang #include <rte_common.h>
3657523e68SFan Zhang #include <rte_malloc.h>
3757523e68SFan Zhang #include <rte_dev.h>
3857523e68SFan Zhang #include <rte_cryptodev.h>
3957523e68SFan Zhang #include <rte_cryptodev_pmd.h>
4063348b9dSPablo de Lara #include <rte_cryptodev_vdev.h>
4157523e68SFan Zhang #include <rte_reorder.h>
4257523e68SFan Zhang 
4357523e68SFan Zhang #include "scheduler_pmd_private.h"
4457523e68SFan Zhang 
4550e14527SFan Zhang /** attaching the slaves predefined by scheduler's EAL options */
4650e14527SFan Zhang static int
4750e14527SFan Zhang scheduler_attach_init_slave(struct rte_cryptodev *dev)
4850e14527SFan Zhang {
4950e14527SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
5050e14527SFan Zhang 	uint8_t scheduler_id = dev->data->dev_id;
5150e14527SFan Zhang 	int i;
5250e14527SFan Zhang 
5350e14527SFan Zhang 	for (i = sched_ctx->nb_init_slaves - 1; i >= 0; i--) {
5450e14527SFan Zhang 		const char *dev_name = sched_ctx->init_slave_names[i];
5550e14527SFan Zhang 		struct rte_cryptodev *slave_dev =
5650e14527SFan Zhang 				rte_cryptodev_pmd_get_named_dev(dev_name);
5750e14527SFan Zhang 		int status;
5850e14527SFan Zhang 
5950e14527SFan Zhang 		if (!slave_dev) {
6050e14527SFan Zhang 			CS_LOG_ERR("Failed to locate slave dev %s",
6150e14527SFan Zhang 					dev_name);
6250e14527SFan Zhang 			return -EINVAL;
6350e14527SFan Zhang 		}
6450e14527SFan Zhang 
6550e14527SFan Zhang 		status = rte_cryptodev_scheduler_slave_attach(
6650e14527SFan Zhang 				scheduler_id, slave_dev->data->dev_id);
6750e14527SFan Zhang 
6850e14527SFan Zhang 		if (status < 0) {
6950e14527SFan Zhang 			CS_LOG_ERR("Failed to attach slave cryptodev %u",
7050e14527SFan Zhang 					slave_dev->data->dev_id);
7150e14527SFan Zhang 			return status;
7250e14527SFan Zhang 		}
7350e14527SFan Zhang 
7450e14527SFan Zhang 		CS_LOG_INFO("Scheduler %s attached slave %s\n",
7550e14527SFan Zhang 				dev->data->name,
7650e14527SFan Zhang 				sched_ctx->init_slave_names[i]);
7750e14527SFan Zhang 
7850e14527SFan Zhang 		rte_free(sched_ctx->init_slave_names[i]);
7950e14527SFan Zhang 
8050e14527SFan Zhang 		sched_ctx->nb_init_slaves -= 1;
8150e14527SFan Zhang 	}
8250e14527SFan Zhang 
8350e14527SFan Zhang 	return 0;
8450e14527SFan Zhang }
8557523e68SFan Zhang /** Configure device */
8657523e68SFan Zhang static int
8760e686c2SFan Zhang scheduler_pmd_config(struct rte_cryptodev *dev,
88b3bbd9e5SSlawomir Mrozowicz 		struct rte_cryptodev_config *config)
8957523e68SFan Zhang {
90b3bbd9e5SSlawomir Mrozowicz 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
91b3bbd9e5SSlawomir Mrozowicz 	uint32_t i;
9250e14527SFan Zhang 	int ret;
9350e14527SFan Zhang 
9450e14527SFan Zhang 	/* although scheduler_attach_init_slave presents multiple times,
9550e14527SFan Zhang 	 * there will be only 1 meaningful execution.
9650e14527SFan Zhang 	 */
9750e14527SFan Zhang 	ret = scheduler_attach_init_slave(dev);
9850e14527SFan Zhang 	if (ret < 0)
9950e14527SFan Zhang 		return ret;
10057523e68SFan Zhang 
101b3bbd9e5SSlawomir Mrozowicz 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
102b3bbd9e5SSlawomir Mrozowicz 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
103b3bbd9e5SSlawomir Mrozowicz 
104f7db6f82SPablo de Lara 		ret = rte_cryptodev_configure(slave_dev_id, config);
105b3bbd9e5SSlawomir Mrozowicz 		if (ret < 0)
106b3bbd9e5SSlawomir Mrozowicz 			break;
107b3bbd9e5SSlawomir Mrozowicz 	}
108b3bbd9e5SSlawomir Mrozowicz 
10957523e68SFan Zhang 	return ret;
11057523e68SFan Zhang }
11157523e68SFan Zhang 
11257523e68SFan Zhang static int
1138a48e039SFan Zhang update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
11457523e68SFan Zhang {
11557523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
11657523e68SFan Zhang 	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
11757523e68SFan Zhang 
11857523e68SFan Zhang 	if (sched_ctx->reordering_enabled) {
1198a48e039SFan Zhang 		char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1208a48e039SFan Zhang 		uint32_t buff_size = rte_align32pow2(
1218a48e039SFan Zhang 			sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE);
12257523e68SFan Zhang 
1238a48e039SFan Zhang 		if (qp_ctx->order_ring) {
1248a48e039SFan Zhang 			rte_ring_free(qp_ctx->order_ring);
1258a48e039SFan Zhang 			qp_ctx->order_ring = NULL;
12657523e68SFan Zhang 		}
12757523e68SFan Zhang 
12857523e68SFan Zhang 		if (!buff_size)
12957523e68SFan Zhang 			return 0;
13057523e68SFan Zhang 
1318a48e039SFan Zhang 		if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN,
13257523e68SFan Zhang 			"%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
13357523e68SFan Zhang 			dev->data->dev_id, qp_id) < 0) {
13457523e68SFan Zhang 			CS_LOG_ERR("failed to create unique reorder buffer "
13557523e68SFan Zhang 					"name");
13657523e68SFan Zhang 			return -ENOMEM;
13757523e68SFan Zhang 		}
13857523e68SFan Zhang 
1398a48e039SFan Zhang 		qp_ctx->order_ring = rte_ring_create(order_ring_name,
1408a48e039SFan Zhang 				buff_size, rte_socket_id(),
1418a48e039SFan Zhang 				RING_F_SP_ENQ | RING_F_SC_DEQ);
1428a48e039SFan Zhang 		if (!qp_ctx->order_ring) {
1438a48e039SFan Zhang 			CS_LOG_ERR("failed to create order ring");
14457523e68SFan Zhang 			return -ENOMEM;
14557523e68SFan Zhang 		}
14657523e68SFan Zhang 	} else {
1478a48e039SFan Zhang 		if (qp_ctx->order_ring) {
1488a48e039SFan Zhang 			rte_ring_free(qp_ctx->order_ring);
1498a48e039SFan Zhang 			qp_ctx->order_ring = NULL;
15057523e68SFan Zhang 		}
15157523e68SFan Zhang 	}
15257523e68SFan Zhang 
15357523e68SFan Zhang 	return 0;
15457523e68SFan Zhang }
15557523e68SFan Zhang 
15657523e68SFan Zhang /** Start device */
15757523e68SFan Zhang static int
15857523e68SFan Zhang scheduler_pmd_start(struct rte_cryptodev *dev)
15957523e68SFan Zhang {
16057523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
16157523e68SFan Zhang 	uint32_t i;
16257523e68SFan Zhang 	int ret;
16357523e68SFan Zhang 
16457523e68SFan Zhang 	if (dev->data->dev_started)
16557523e68SFan Zhang 		return 0;
16657523e68SFan Zhang 
16750e14527SFan Zhang 	/* although scheduler_attach_init_slave presents multiple times,
16850e14527SFan Zhang 	 * there will be only 1 meaningful execution.
16950e14527SFan Zhang 	 */
17050e14527SFan Zhang 	ret = scheduler_attach_init_slave(dev);
17150e14527SFan Zhang 	if (ret < 0)
17250e14527SFan Zhang 		return ret;
17350e14527SFan Zhang 
17457523e68SFan Zhang 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1758a48e039SFan Zhang 		ret = update_order_ring(dev, i);
17657523e68SFan Zhang 		if (ret < 0) {
17757523e68SFan Zhang 			CS_LOG_ERR("Failed to update reorder buffer");
17857523e68SFan Zhang 			return ret;
17957523e68SFan Zhang 		}
18057523e68SFan Zhang 	}
18157523e68SFan Zhang 
18257523e68SFan Zhang 	if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) {
18357523e68SFan Zhang 		CS_LOG_ERR("Scheduler mode is not set");
18457523e68SFan Zhang 		return -1;
18557523e68SFan Zhang 	}
18657523e68SFan Zhang 
18757523e68SFan Zhang 	if (!sched_ctx->nb_slaves) {
18857523e68SFan Zhang 		CS_LOG_ERR("No slave in the scheduler");
18957523e68SFan Zhang 		return -1;
19057523e68SFan Zhang 	}
19157523e68SFan Zhang 
19257523e68SFan Zhang 	RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP);
19357523e68SFan Zhang 
19457523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
19557523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
19657523e68SFan Zhang 
19757523e68SFan Zhang 		if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) {
19857523e68SFan Zhang 			CS_LOG_ERR("Failed to attach slave");
19957523e68SFan Zhang 			return -ENOTSUP;
20057523e68SFan Zhang 		}
20157523e68SFan Zhang 	}
20257523e68SFan Zhang 
20357523e68SFan Zhang 	RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP);
20457523e68SFan Zhang 
20557523e68SFan Zhang 	if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
20657523e68SFan Zhang 		CS_LOG_ERR("Scheduler start failed");
20757523e68SFan Zhang 		return -1;
20857523e68SFan Zhang 	}
20957523e68SFan Zhang 
21057523e68SFan Zhang 	/* start all slaves */
21157523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
21257523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
21357523e68SFan Zhang 		struct rte_cryptodev *slave_dev =
21457523e68SFan Zhang 				rte_cryptodev_pmd_get_dev(slave_dev_id);
21557523e68SFan Zhang 
21657523e68SFan Zhang 		ret = (*slave_dev->dev_ops->dev_start)(slave_dev);
21757523e68SFan Zhang 		if (ret < 0) {
21857523e68SFan Zhang 			CS_LOG_ERR("Failed to start slave dev %u",
21957523e68SFan Zhang 					slave_dev_id);
22057523e68SFan Zhang 			return ret;
22157523e68SFan Zhang 		}
22257523e68SFan Zhang 	}
22357523e68SFan Zhang 
22457523e68SFan Zhang 	return 0;
22557523e68SFan Zhang }
22657523e68SFan Zhang 
22757523e68SFan Zhang /** Stop device */
22857523e68SFan Zhang static void
22957523e68SFan Zhang scheduler_pmd_stop(struct rte_cryptodev *dev)
23057523e68SFan Zhang {
23157523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
23257523e68SFan Zhang 	uint32_t i;
23357523e68SFan Zhang 
23457523e68SFan Zhang 	if (!dev->data->dev_started)
23557523e68SFan Zhang 		return;
23657523e68SFan Zhang 
23757523e68SFan Zhang 	/* stop all slaves first */
23857523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
23957523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
24057523e68SFan Zhang 		struct rte_cryptodev *slave_dev =
24157523e68SFan Zhang 				rte_cryptodev_pmd_get_dev(slave_dev_id);
24257523e68SFan Zhang 
24357523e68SFan Zhang 		(*slave_dev->dev_ops->dev_stop)(slave_dev);
24457523e68SFan Zhang 	}
24557523e68SFan Zhang 
24657523e68SFan Zhang 	if (*sched_ctx->ops.scheduler_stop)
24757523e68SFan Zhang 		(*sched_ctx->ops.scheduler_stop)(dev);
24857523e68SFan Zhang 
24957523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
25057523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
25157523e68SFan Zhang 
25257523e68SFan Zhang 		if (*sched_ctx->ops.slave_detach)
25357523e68SFan Zhang 			(*sched_ctx->ops.slave_detach)(dev, slave_dev_id);
25457523e68SFan Zhang 	}
25557523e68SFan Zhang }
25657523e68SFan Zhang 
25757523e68SFan Zhang /** Close device */
25857523e68SFan Zhang static int
25957523e68SFan Zhang scheduler_pmd_close(struct rte_cryptodev *dev)
26057523e68SFan Zhang {
26157523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
26257523e68SFan Zhang 	uint32_t i;
26357523e68SFan Zhang 	int ret;
26457523e68SFan Zhang 
26557523e68SFan Zhang 	/* the dev should be stopped before being closed */
26657523e68SFan Zhang 	if (dev->data->dev_started)
26757523e68SFan Zhang 		return -EBUSY;
26857523e68SFan Zhang 
26957523e68SFan Zhang 	/* close all slaves first */
27057523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
27157523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
27257523e68SFan Zhang 		struct rte_cryptodev *slave_dev =
27357523e68SFan Zhang 				rte_cryptodev_pmd_get_dev(slave_dev_id);
27457523e68SFan Zhang 
27557523e68SFan Zhang 		ret = (*slave_dev->dev_ops->dev_close)(slave_dev);
27657523e68SFan Zhang 		if (ret < 0)
27757523e68SFan Zhang 			return ret;
27857523e68SFan Zhang 	}
27957523e68SFan Zhang 
28057523e68SFan Zhang 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
28157523e68SFan Zhang 		struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
28257523e68SFan Zhang 
2838a48e039SFan Zhang 		if (qp_ctx->order_ring) {
2848a48e039SFan Zhang 			rte_ring_free(qp_ctx->order_ring);
2858a48e039SFan Zhang 			qp_ctx->order_ring = NULL;
28657523e68SFan Zhang 		}
28757523e68SFan Zhang 
28857523e68SFan Zhang 		if (qp_ctx->private_qp_ctx) {
28957523e68SFan Zhang 			rte_free(qp_ctx->private_qp_ctx);
29057523e68SFan Zhang 			qp_ctx->private_qp_ctx = NULL;
29157523e68SFan Zhang 		}
29257523e68SFan Zhang 	}
29357523e68SFan Zhang 
29457523e68SFan Zhang 	if (sched_ctx->private_ctx)
29557523e68SFan Zhang 		rte_free(sched_ctx->private_ctx);
29657523e68SFan Zhang 
29757523e68SFan Zhang 	if (sched_ctx->capabilities)
29857523e68SFan Zhang 		rte_free(sched_ctx->capabilities);
29957523e68SFan Zhang 
30057523e68SFan Zhang 	return 0;
30157523e68SFan Zhang }
30257523e68SFan Zhang 
30357523e68SFan Zhang /** Get device statistics */
30457523e68SFan Zhang static void
30557523e68SFan Zhang scheduler_pmd_stats_get(struct rte_cryptodev *dev,
30657523e68SFan Zhang 	struct rte_cryptodev_stats *stats)
30757523e68SFan Zhang {
30857523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
30957523e68SFan Zhang 	uint32_t i;
31057523e68SFan Zhang 
31157523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
31257523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
31357523e68SFan Zhang 		struct rte_cryptodev *slave_dev =
31457523e68SFan Zhang 				rte_cryptodev_pmd_get_dev(slave_dev_id);
31557523e68SFan Zhang 		struct rte_cryptodev_stats slave_stats = {0};
31657523e68SFan Zhang 
31757523e68SFan Zhang 		(*slave_dev->dev_ops->stats_get)(slave_dev, &slave_stats);
31857523e68SFan Zhang 
31957523e68SFan Zhang 		stats->enqueued_count += slave_stats.enqueued_count;
32057523e68SFan Zhang 		stats->dequeued_count += slave_stats.dequeued_count;
32157523e68SFan Zhang 
32257523e68SFan Zhang 		stats->enqueue_err_count += slave_stats.enqueue_err_count;
32357523e68SFan Zhang 		stats->dequeue_err_count += slave_stats.dequeue_err_count;
32457523e68SFan Zhang 	}
32557523e68SFan Zhang }
32657523e68SFan Zhang 
32757523e68SFan Zhang /** Reset device statistics */
32857523e68SFan Zhang static void
32957523e68SFan Zhang scheduler_pmd_stats_reset(struct rte_cryptodev *dev)
33057523e68SFan Zhang {
33157523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
33257523e68SFan Zhang 	uint32_t i;
33357523e68SFan Zhang 
33457523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
33557523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
33657523e68SFan Zhang 		struct rte_cryptodev *slave_dev =
33757523e68SFan Zhang 				rte_cryptodev_pmd_get_dev(slave_dev_id);
33857523e68SFan Zhang 
33957523e68SFan Zhang 		(*slave_dev->dev_ops->stats_reset)(slave_dev);
34057523e68SFan Zhang 	}
34157523e68SFan Zhang }
34257523e68SFan Zhang 
34357523e68SFan Zhang /** Get device info */
34457523e68SFan Zhang static void
34557523e68SFan Zhang scheduler_pmd_info_get(struct rte_cryptodev *dev,
34657523e68SFan Zhang 		struct rte_cryptodev_info *dev_info)
34757523e68SFan Zhang {
34857523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
34957523e68SFan Zhang 	uint32_t max_nb_sessions = sched_ctx->nb_slaves ?
35057523e68SFan Zhang 			UINT32_MAX : RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS;
35157523e68SFan Zhang 	uint32_t i;
35257523e68SFan Zhang 
35357523e68SFan Zhang 	if (!dev_info)
35457523e68SFan Zhang 		return;
35557523e68SFan Zhang 
35650e14527SFan Zhang 	/* although scheduler_attach_init_slave presents multiple times,
35750e14527SFan Zhang 	 * there will be only 1 meaningful execution.
35850e14527SFan Zhang 	 */
35950e14527SFan Zhang 	scheduler_attach_init_slave(dev);
36050e14527SFan Zhang 
36157523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
36257523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
36357523e68SFan Zhang 		struct rte_cryptodev_info slave_info;
36457523e68SFan Zhang 
36557523e68SFan Zhang 		rte_cryptodev_info_get(slave_dev_id, &slave_info);
36657523e68SFan Zhang 		max_nb_sessions = slave_info.sym.max_nb_sessions <
36757523e68SFan Zhang 				max_nb_sessions ?
36857523e68SFan Zhang 				slave_info.sym.max_nb_sessions :
36957523e68SFan Zhang 				max_nb_sessions;
37057523e68SFan Zhang 	}
37157523e68SFan Zhang 
3727a364faeSSlawomir Mrozowicz 	dev_info->driver_id = dev->driver_id;
37357523e68SFan Zhang 	dev_info->feature_flags = dev->feature_flags;
37457523e68SFan Zhang 	dev_info->capabilities = sched_ctx->capabilities;
37557523e68SFan Zhang 	dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
37657523e68SFan Zhang 	dev_info->sym.max_nb_sessions = max_nb_sessions;
37757523e68SFan Zhang }
37857523e68SFan Zhang 
37957523e68SFan Zhang /** Release queue pair */
38057523e68SFan Zhang static int
38157523e68SFan Zhang scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
38257523e68SFan Zhang {
38357523e68SFan Zhang 	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
38457523e68SFan Zhang 
38557523e68SFan Zhang 	if (!qp_ctx)
38657523e68SFan Zhang 		return 0;
38757523e68SFan Zhang 
3888a48e039SFan Zhang 	if (qp_ctx->order_ring)
3898a48e039SFan Zhang 		rte_ring_free(qp_ctx->order_ring);
39057523e68SFan Zhang 	if (qp_ctx->private_qp_ctx)
39157523e68SFan Zhang 		rte_free(qp_ctx->private_qp_ctx);
39257523e68SFan Zhang 
39357523e68SFan Zhang 	rte_free(qp_ctx);
39457523e68SFan Zhang 	dev->data->queue_pairs[qp_id] = NULL;
39557523e68SFan Zhang 
39657523e68SFan Zhang 	return 0;
39757523e68SFan Zhang }
39857523e68SFan Zhang 
39957523e68SFan Zhang /** Setup a queue pair */
40057523e68SFan Zhang static int
40157523e68SFan Zhang scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
402f7db6f82SPablo de Lara 	const struct rte_cryptodev_qp_conf *qp_conf, int socket_id,
403f7db6f82SPablo de Lara 	struct rte_mempool *session_pool)
40457523e68SFan Zhang {
40557523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
40657523e68SFan Zhang 	struct scheduler_qp_ctx *qp_ctx;
40757523e68SFan Zhang 	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
408c281019bSFan Zhang 	uint32_t i;
409c281019bSFan Zhang 	int ret;
41057523e68SFan Zhang 
41157523e68SFan Zhang 	if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
41257523e68SFan Zhang 			"CRYTO_SCHE PMD %u QP %u",
41357523e68SFan Zhang 			dev->data->dev_id, qp_id) < 0) {
41457523e68SFan Zhang 		CS_LOG_ERR("Failed to create unique queue pair name");
41557523e68SFan Zhang 		return -EFAULT;
41657523e68SFan Zhang 	}
41757523e68SFan Zhang 
41857523e68SFan Zhang 	/* Free memory prior to re-allocation if needed. */
41957523e68SFan Zhang 	if (dev->data->queue_pairs[qp_id] != NULL)
42057523e68SFan Zhang 		scheduler_pmd_qp_release(dev, qp_id);
42157523e68SFan Zhang 
422c281019bSFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
423c281019bSFan Zhang 		uint8_t slave_id = sched_ctx->slaves[i].dev_id;
424c281019bSFan Zhang 
425f7db6f82SPablo de Lara 		/*
426f7db6f82SPablo de Lara 		 * All slaves will share the same session mempool
427f7db6f82SPablo de Lara 		 * for session-less operations, so the objects
428f7db6f82SPablo de Lara 		 * must be big enough for all the drivers used.
429f7db6f82SPablo de Lara 		 */
430c281019bSFan Zhang 		ret = rte_cryptodev_queue_pair_setup(slave_id, qp_id,
431f7db6f82SPablo de Lara 				qp_conf, socket_id, session_pool);
432c281019bSFan Zhang 		if (ret < 0)
433c281019bSFan Zhang 			return ret;
434c281019bSFan Zhang 	}
435c281019bSFan Zhang 
43657523e68SFan Zhang 	/* Allocate the queue pair data structure. */
43757523e68SFan Zhang 	qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
43857523e68SFan Zhang 			socket_id);
43957523e68SFan Zhang 	if (qp_ctx == NULL)
44057523e68SFan Zhang 		return -ENOMEM;
44157523e68SFan Zhang 
44288405476SFan Zhang 	/* The actual available object number = nb_descriptors - 1 */
44388405476SFan Zhang 	qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1;
44488405476SFan Zhang 
44557523e68SFan Zhang 	dev->data->queue_pairs[qp_id] = qp_ctx;
44657523e68SFan Zhang 
44750e14527SFan Zhang 	/* although scheduler_attach_init_slave presents multiple times,
44850e14527SFan Zhang 	 * there will be only 1 meaningful execution.
44950e14527SFan Zhang 	 */
45050e14527SFan Zhang 	ret = scheduler_attach_init_slave(dev);
45150e14527SFan Zhang 	if (ret < 0) {
45250e14527SFan Zhang 		CS_LOG_ERR("Failed to attach slave");
45350e14527SFan Zhang 		scheduler_pmd_qp_release(dev, qp_id);
45450e14527SFan Zhang 		return ret;
45550e14527SFan Zhang 	}
45650e14527SFan Zhang 
45757523e68SFan Zhang 	if (*sched_ctx->ops.config_queue_pair) {
45857523e68SFan Zhang 		if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) {
45957523e68SFan Zhang 			CS_LOG_ERR("Unable to configure queue pair");
46057523e68SFan Zhang 			return -1;
46157523e68SFan Zhang 		}
46257523e68SFan Zhang 	}
46357523e68SFan Zhang 
46457523e68SFan Zhang 	return 0;
46557523e68SFan Zhang }
46657523e68SFan Zhang 
46757523e68SFan Zhang /** Start queue pair */
46857523e68SFan Zhang static int
46957523e68SFan Zhang scheduler_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
47057523e68SFan Zhang 		__rte_unused uint16_t queue_pair_id)
47157523e68SFan Zhang {
47257523e68SFan Zhang 	return -ENOTSUP;
47357523e68SFan Zhang }
47457523e68SFan Zhang 
47557523e68SFan Zhang /** Stop queue pair */
47657523e68SFan Zhang static int
47757523e68SFan Zhang scheduler_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
47857523e68SFan Zhang 		__rte_unused uint16_t queue_pair_id)
47957523e68SFan Zhang {
48057523e68SFan Zhang 	return -ENOTSUP;
48157523e68SFan Zhang }
48257523e68SFan Zhang 
48357523e68SFan Zhang /** Return the number of allocated queue pairs */
48457523e68SFan Zhang static uint32_t
48557523e68SFan Zhang scheduler_pmd_qp_count(struct rte_cryptodev *dev)
48657523e68SFan Zhang {
48757523e68SFan Zhang 	return dev->data->nb_queue_pairs;
48857523e68SFan Zhang }
48957523e68SFan Zhang 
49057523e68SFan Zhang static uint32_t
49157523e68SFan Zhang scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
49257523e68SFan Zhang {
493b3bbd9e5SSlawomir Mrozowicz 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
494b3bbd9e5SSlawomir Mrozowicz 	uint8_t i = 0;
495b3bbd9e5SSlawomir Mrozowicz 	uint32_t max_priv_sess_size = 0;
496b3bbd9e5SSlawomir Mrozowicz 
497b3bbd9e5SSlawomir Mrozowicz 	/* Check what is the maximum private session size for all slaves */
498b3bbd9e5SSlawomir Mrozowicz 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
499b3bbd9e5SSlawomir Mrozowicz 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
500b3bbd9e5SSlawomir Mrozowicz 		struct rte_cryptodev *dev = &rte_cryptodevs[slave_dev_id];
501b3bbd9e5SSlawomir Mrozowicz 		uint32_t priv_sess_size = (*dev->dev_ops->session_get_size)(dev);
502b3bbd9e5SSlawomir Mrozowicz 
503b3bbd9e5SSlawomir Mrozowicz 		if (max_priv_sess_size < priv_sess_size)
504b3bbd9e5SSlawomir Mrozowicz 			max_priv_sess_size = priv_sess_size;
505b3bbd9e5SSlawomir Mrozowicz 	}
506b3bbd9e5SSlawomir Mrozowicz 
507b3bbd9e5SSlawomir Mrozowicz 	return max_priv_sess_size;
50857523e68SFan Zhang }
50957523e68SFan Zhang 
51057523e68SFan Zhang static int
511b3bbd9e5SSlawomir Mrozowicz scheduler_pmd_session_configure(struct rte_cryptodev *dev,
51257523e68SFan Zhang 	struct rte_crypto_sym_xform *xform,
513b3bbd9e5SSlawomir Mrozowicz 	struct rte_cryptodev_sym_session *sess,
514b3bbd9e5SSlawomir Mrozowicz 	struct rte_mempool *mempool)
51557523e68SFan Zhang {
516b3bbd9e5SSlawomir Mrozowicz 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
51757523e68SFan Zhang 	uint32_t i;
518*27391b53SPablo de Lara 	int ret;
51957523e68SFan Zhang 
52057523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
52157523e68SFan Zhang 		struct scheduler_slave *slave = &sched_ctx->slaves[i];
52257523e68SFan Zhang 
523*27391b53SPablo de Lara 		ret = rte_cryptodev_sym_session_init(slave->dev_id, sess,
524*27391b53SPablo de Lara 					xform, mempool);
525*27391b53SPablo de Lara 		if (ret < 0) {
526b3bbd9e5SSlawomir Mrozowicz 			CS_LOG_ERR("unabled to config sym session");
527*27391b53SPablo de Lara 			return ret;
52857523e68SFan Zhang 		}
52957523e68SFan Zhang 	}
53057523e68SFan Zhang 
53157523e68SFan Zhang 	return 0;
53257523e68SFan Zhang }
53357523e68SFan Zhang 
53457523e68SFan Zhang /** Clear the memory of session so it doesn't leave key material behind */
53557523e68SFan Zhang static void
53657523e68SFan Zhang scheduler_pmd_session_clear(struct rte_cryptodev *dev,
537b3bbd9e5SSlawomir Mrozowicz 		struct rte_cryptodev_sym_session *sess)
53857523e68SFan Zhang {
53957523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
540b3bbd9e5SSlawomir Mrozowicz 	uint32_t i;
54157523e68SFan Zhang 
542b3bbd9e5SSlawomir Mrozowicz 	/* Clear private data of slaves */
543b3bbd9e5SSlawomir Mrozowicz 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
544b3bbd9e5SSlawomir Mrozowicz 		struct scheduler_slave *slave = &sched_ctx->slaves[i];
54557523e68SFan Zhang 
546b3bbd9e5SSlawomir Mrozowicz 		rte_cryptodev_sym_session_clear(slave->dev_id, sess);
54757523e68SFan Zhang 	}
54857523e68SFan Zhang }
54957523e68SFan Zhang 
55057523e68SFan Zhang struct rte_cryptodev_ops scheduler_pmd_ops = {
55157523e68SFan Zhang 		.dev_configure		= scheduler_pmd_config,
55257523e68SFan Zhang 		.dev_start		= scheduler_pmd_start,
55357523e68SFan Zhang 		.dev_stop		= scheduler_pmd_stop,
55457523e68SFan Zhang 		.dev_close		= scheduler_pmd_close,
55557523e68SFan Zhang 
55657523e68SFan Zhang 		.stats_get		= scheduler_pmd_stats_get,
55757523e68SFan Zhang 		.stats_reset		= scheduler_pmd_stats_reset,
55857523e68SFan Zhang 
55957523e68SFan Zhang 		.dev_infos_get		= scheduler_pmd_info_get,
56057523e68SFan Zhang 
56157523e68SFan Zhang 		.queue_pair_setup	= scheduler_pmd_qp_setup,
56257523e68SFan Zhang 		.queue_pair_release	= scheduler_pmd_qp_release,
56357523e68SFan Zhang 		.queue_pair_start	= scheduler_pmd_qp_start,
56457523e68SFan Zhang 		.queue_pair_stop	= scheduler_pmd_qp_stop,
56557523e68SFan Zhang 		.queue_pair_count	= scheduler_pmd_qp_count,
56657523e68SFan Zhang 
56757523e68SFan Zhang 		.session_get_size	= scheduler_pmd_session_get_size,
56857523e68SFan Zhang 		.session_configure	= scheduler_pmd_session_configure,
56957523e68SFan Zhang 		.session_clear		= scheduler_pmd_session_clear,
57057523e68SFan Zhang };
57157523e68SFan Zhang 
57257523e68SFan Zhang struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;
573