xref: /dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c (revision b3bbd9e5f265937164ed8c9c61d12f7543a745ea)
157523e68SFan Zhang /*-
257523e68SFan Zhang  *   BSD LICENSE
357523e68SFan Zhang  *
457523e68SFan Zhang  *   Copyright(c) 2017 Intel Corporation. All rights reserved.
557523e68SFan Zhang  *
657523e68SFan Zhang  *   Redistribution and use in source and binary forms, with or without
757523e68SFan Zhang  *   modification, are permitted provided that the following conditions
857523e68SFan Zhang  *   are met:
957523e68SFan Zhang  *
1057523e68SFan Zhang  *     * Redistributions of source code must retain the above copyright
1157523e68SFan Zhang  *       notice, this list of conditions and the following disclaimer.
1257523e68SFan Zhang  *     * Redistributions in binary form must reproduce the above copyright
1357523e68SFan Zhang  *       notice, this list of conditions and the following disclaimer in
1457523e68SFan Zhang  *       the documentation and/or other materials provided with the
1557523e68SFan Zhang  *       distribution.
1657523e68SFan Zhang  *     * Neither the name of Intel Corporation nor the names of its
1757523e68SFan Zhang  *       contributors may be used to endorse or promote products derived
1857523e68SFan Zhang  *       from this software without specific prior written permission.
1957523e68SFan Zhang  *
2057523e68SFan Zhang  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
2157523e68SFan Zhang  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
2257523e68SFan Zhang  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
2357523e68SFan Zhang  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
2457523e68SFan Zhang  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2557523e68SFan Zhang  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2657523e68SFan Zhang  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2757523e68SFan Zhang  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2857523e68SFan Zhang  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2957523e68SFan Zhang  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
3057523e68SFan Zhang  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3157523e68SFan Zhang  */
3257523e68SFan Zhang #include <string.h>
3357523e68SFan Zhang 
3457523e68SFan Zhang #include <rte_config.h>
3557523e68SFan Zhang #include <rte_common.h>
3657523e68SFan Zhang #include <rte_malloc.h>
3757523e68SFan Zhang #include <rte_dev.h>
3857523e68SFan Zhang #include <rte_cryptodev.h>
3957523e68SFan Zhang #include <rte_cryptodev_pmd.h>
4063348b9dSPablo de Lara #include <rte_cryptodev_vdev.h>
4157523e68SFan Zhang #include <rte_reorder.h>
4257523e68SFan Zhang 
4357523e68SFan Zhang #include "scheduler_pmd_private.h"
4457523e68SFan Zhang 
4550e14527SFan Zhang /** attaching the slaves predefined by scheduler's EAL options */
4650e14527SFan Zhang static int
4750e14527SFan Zhang scheduler_attach_init_slave(struct rte_cryptodev *dev)
4850e14527SFan Zhang {
4950e14527SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
5050e14527SFan Zhang 	uint8_t scheduler_id = dev->data->dev_id;
5150e14527SFan Zhang 	int i;
5250e14527SFan Zhang 
5350e14527SFan Zhang 	for (i = sched_ctx->nb_init_slaves - 1; i >= 0; i--) {
5450e14527SFan Zhang 		const char *dev_name = sched_ctx->init_slave_names[i];
5550e14527SFan Zhang 		struct rte_cryptodev *slave_dev =
5650e14527SFan Zhang 				rte_cryptodev_pmd_get_named_dev(dev_name);
5750e14527SFan Zhang 		int status;
5850e14527SFan Zhang 
5950e14527SFan Zhang 		if (!slave_dev) {
6050e14527SFan Zhang 			CS_LOG_ERR("Failed to locate slave dev %s",
6150e14527SFan Zhang 					dev_name);
6250e14527SFan Zhang 			return -EINVAL;
6350e14527SFan Zhang 		}
6450e14527SFan Zhang 
6550e14527SFan Zhang 		status = rte_cryptodev_scheduler_slave_attach(
6650e14527SFan Zhang 				scheduler_id, slave_dev->data->dev_id);
6750e14527SFan Zhang 
6850e14527SFan Zhang 		if (status < 0) {
6950e14527SFan Zhang 			CS_LOG_ERR("Failed to attach slave cryptodev %u",
7050e14527SFan Zhang 					slave_dev->data->dev_id);
7150e14527SFan Zhang 			return status;
7250e14527SFan Zhang 		}
7350e14527SFan Zhang 
7450e14527SFan Zhang 		CS_LOG_INFO("Scheduler %s attached slave %s\n",
7550e14527SFan Zhang 				dev->data->name,
7650e14527SFan Zhang 				sched_ctx->init_slave_names[i]);
7750e14527SFan Zhang 
7850e14527SFan Zhang 		rte_free(sched_ctx->init_slave_names[i]);
7950e14527SFan Zhang 
8050e14527SFan Zhang 		sched_ctx->nb_init_slaves -= 1;
8150e14527SFan Zhang 	}
8250e14527SFan Zhang 
8350e14527SFan Zhang 	return 0;
8450e14527SFan Zhang }
8557523e68SFan Zhang /** Configure device */
8657523e68SFan Zhang static int
8760e686c2SFan Zhang scheduler_pmd_config(struct rte_cryptodev *dev,
88*b3bbd9e5SSlawomir Mrozowicz 		struct rte_cryptodev_config *config)
8957523e68SFan Zhang {
90*b3bbd9e5SSlawomir Mrozowicz 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
91*b3bbd9e5SSlawomir Mrozowicz 	uint32_t i;
9250e14527SFan Zhang 	int ret;
9350e14527SFan Zhang 
9450e14527SFan Zhang 	/* although scheduler_attach_init_slave presents multiple times,
9550e14527SFan Zhang 	 * there will be only 1 meaningful execution.
9650e14527SFan Zhang 	 */
9750e14527SFan Zhang 	ret = scheduler_attach_init_slave(dev);
9850e14527SFan Zhang 	if (ret < 0)
9950e14527SFan Zhang 		return ret;
10057523e68SFan Zhang 
101*b3bbd9e5SSlawomir Mrozowicz 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
102*b3bbd9e5SSlawomir Mrozowicz 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
103*b3bbd9e5SSlawomir Mrozowicz 
104*b3bbd9e5SSlawomir Mrozowicz 		ret = rte_cryptodev_configure(slave_dev_id, config,
105*b3bbd9e5SSlawomir Mrozowicz 				dev->data->session_pool);
106*b3bbd9e5SSlawomir Mrozowicz 		if (ret < 0)
107*b3bbd9e5SSlawomir Mrozowicz 			break;
108*b3bbd9e5SSlawomir Mrozowicz 	}
109*b3bbd9e5SSlawomir Mrozowicz 
11057523e68SFan Zhang 	return ret;
11157523e68SFan Zhang }
11257523e68SFan Zhang 
11357523e68SFan Zhang static int
1148a48e039SFan Zhang update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
11557523e68SFan Zhang {
11657523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
11757523e68SFan Zhang 	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
11857523e68SFan Zhang 
11957523e68SFan Zhang 	if (sched_ctx->reordering_enabled) {
1208a48e039SFan Zhang 		char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1218a48e039SFan Zhang 		uint32_t buff_size = rte_align32pow2(
1228a48e039SFan Zhang 			sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE);
12357523e68SFan Zhang 
1248a48e039SFan Zhang 		if (qp_ctx->order_ring) {
1258a48e039SFan Zhang 			rte_ring_free(qp_ctx->order_ring);
1268a48e039SFan Zhang 			qp_ctx->order_ring = NULL;
12757523e68SFan Zhang 		}
12857523e68SFan Zhang 
12957523e68SFan Zhang 		if (!buff_size)
13057523e68SFan Zhang 			return 0;
13157523e68SFan Zhang 
1328a48e039SFan Zhang 		if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN,
13357523e68SFan Zhang 			"%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
13457523e68SFan Zhang 			dev->data->dev_id, qp_id) < 0) {
13557523e68SFan Zhang 			CS_LOG_ERR("failed to create unique reorder buffer "
13657523e68SFan Zhang 					"name");
13757523e68SFan Zhang 			return -ENOMEM;
13857523e68SFan Zhang 		}
13957523e68SFan Zhang 
1408a48e039SFan Zhang 		qp_ctx->order_ring = rte_ring_create(order_ring_name,
1418a48e039SFan Zhang 				buff_size, rte_socket_id(),
1428a48e039SFan Zhang 				RING_F_SP_ENQ | RING_F_SC_DEQ);
1438a48e039SFan Zhang 		if (!qp_ctx->order_ring) {
1448a48e039SFan Zhang 			CS_LOG_ERR("failed to create order ring");
14557523e68SFan Zhang 			return -ENOMEM;
14657523e68SFan Zhang 		}
14757523e68SFan Zhang 	} else {
1488a48e039SFan Zhang 		if (qp_ctx->order_ring) {
1498a48e039SFan Zhang 			rte_ring_free(qp_ctx->order_ring);
1508a48e039SFan Zhang 			qp_ctx->order_ring = NULL;
15157523e68SFan Zhang 		}
15257523e68SFan Zhang 	}
15357523e68SFan Zhang 
15457523e68SFan Zhang 	return 0;
15557523e68SFan Zhang }
15657523e68SFan Zhang 
15757523e68SFan Zhang /** Start device */
15857523e68SFan Zhang static int
15957523e68SFan Zhang scheduler_pmd_start(struct rte_cryptodev *dev)
16057523e68SFan Zhang {
16157523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
16257523e68SFan Zhang 	uint32_t i;
16357523e68SFan Zhang 	int ret;
16457523e68SFan Zhang 
16557523e68SFan Zhang 	if (dev->data->dev_started)
16657523e68SFan Zhang 		return 0;
16757523e68SFan Zhang 
16850e14527SFan Zhang 	/* although scheduler_attach_init_slave presents multiple times,
16950e14527SFan Zhang 	 * there will be only 1 meaningful execution.
17050e14527SFan Zhang 	 */
17150e14527SFan Zhang 	ret = scheduler_attach_init_slave(dev);
17250e14527SFan Zhang 	if (ret < 0)
17350e14527SFan Zhang 		return ret;
17450e14527SFan Zhang 
17557523e68SFan Zhang 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1768a48e039SFan Zhang 		ret = update_order_ring(dev, i);
17757523e68SFan Zhang 		if (ret < 0) {
17857523e68SFan Zhang 			CS_LOG_ERR("Failed to update reorder buffer");
17957523e68SFan Zhang 			return ret;
18057523e68SFan Zhang 		}
18157523e68SFan Zhang 	}
18257523e68SFan Zhang 
18357523e68SFan Zhang 	if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) {
18457523e68SFan Zhang 		CS_LOG_ERR("Scheduler mode is not set");
18557523e68SFan Zhang 		return -1;
18657523e68SFan Zhang 	}
18757523e68SFan Zhang 
18857523e68SFan Zhang 	if (!sched_ctx->nb_slaves) {
18957523e68SFan Zhang 		CS_LOG_ERR("No slave in the scheduler");
19057523e68SFan Zhang 		return -1;
19157523e68SFan Zhang 	}
19257523e68SFan Zhang 
19357523e68SFan Zhang 	RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP);
19457523e68SFan Zhang 
19557523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
19657523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
19757523e68SFan Zhang 
19857523e68SFan Zhang 		if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) {
19957523e68SFan Zhang 			CS_LOG_ERR("Failed to attach slave");
20057523e68SFan Zhang 			return -ENOTSUP;
20157523e68SFan Zhang 		}
20257523e68SFan Zhang 	}
20357523e68SFan Zhang 
20457523e68SFan Zhang 	RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP);
20557523e68SFan Zhang 
20657523e68SFan Zhang 	if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
20757523e68SFan Zhang 		CS_LOG_ERR("Scheduler start failed");
20857523e68SFan Zhang 		return -1;
20957523e68SFan Zhang 	}
21057523e68SFan Zhang 
21157523e68SFan Zhang 	/* start all slaves */
21257523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
21357523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
21457523e68SFan Zhang 		struct rte_cryptodev *slave_dev =
21557523e68SFan Zhang 				rte_cryptodev_pmd_get_dev(slave_dev_id);
21657523e68SFan Zhang 
21757523e68SFan Zhang 		ret = (*slave_dev->dev_ops->dev_start)(slave_dev);
21857523e68SFan Zhang 		if (ret < 0) {
21957523e68SFan Zhang 			CS_LOG_ERR("Failed to start slave dev %u",
22057523e68SFan Zhang 					slave_dev_id);
22157523e68SFan Zhang 			return ret;
22257523e68SFan Zhang 		}
22357523e68SFan Zhang 	}
22457523e68SFan Zhang 
22557523e68SFan Zhang 	return 0;
22657523e68SFan Zhang }
22757523e68SFan Zhang 
22857523e68SFan Zhang /** Stop device */
22957523e68SFan Zhang static void
23057523e68SFan Zhang scheduler_pmd_stop(struct rte_cryptodev *dev)
23157523e68SFan Zhang {
23257523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
23357523e68SFan Zhang 	uint32_t i;
23457523e68SFan Zhang 
23557523e68SFan Zhang 	if (!dev->data->dev_started)
23657523e68SFan Zhang 		return;
23757523e68SFan Zhang 
23857523e68SFan Zhang 	/* stop all slaves first */
23957523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
24057523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
24157523e68SFan Zhang 		struct rte_cryptodev *slave_dev =
24257523e68SFan Zhang 				rte_cryptodev_pmd_get_dev(slave_dev_id);
24357523e68SFan Zhang 
24457523e68SFan Zhang 		(*slave_dev->dev_ops->dev_stop)(slave_dev);
24557523e68SFan Zhang 	}
24657523e68SFan Zhang 
24757523e68SFan Zhang 	if (*sched_ctx->ops.scheduler_stop)
24857523e68SFan Zhang 		(*sched_ctx->ops.scheduler_stop)(dev);
24957523e68SFan Zhang 
25057523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
25157523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
25257523e68SFan Zhang 
25357523e68SFan Zhang 		if (*sched_ctx->ops.slave_detach)
25457523e68SFan Zhang 			(*sched_ctx->ops.slave_detach)(dev, slave_dev_id);
25557523e68SFan Zhang 	}
25657523e68SFan Zhang }
25757523e68SFan Zhang 
25857523e68SFan Zhang /** Close device */
25957523e68SFan Zhang static int
26057523e68SFan Zhang scheduler_pmd_close(struct rte_cryptodev *dev)
26157523e68SFan Zhang {
26257523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
26357523e68SFan Zhang 	uint32_t i;
26457523e68SFan Zhang 	int ret;
26557523e68SFan Zhang 
26657523e68SFan Zhang 	/* the dev should be stopped before being closed */
26757523e68SFan Zhang 	if (dev->data->dev_started)
26857523e68SFan Zhang 		return -EBUSY;
26957523e68SFan Zhang 
27057523e68SFan Zhang 	/* close all slaves first */
27157523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
27257523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
27357523e68SFan Zhang 		struct rte_cryptodev *slave_dev =
27457523e68SFan Zhang 				rte_cryptodev_pmd_get_dev(slave_dev_id);
27557523e68SFan Zhang 
27657523e68SFan Zhang 		ret = (*slave_dev->dev_ops->dev_close)(slave_dev);
27757523e68SFan Zhang 		if (ret < 0)
27857523e68SFan Zhang 			return ret;
27957523e68SFan Zhang 	}
28057523e68SFan Zhang 
28157523e68SFan Zhang 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
28257523e68SFan Zhang 		struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
28357523e68SFan Zhang 
2848a48e039SFan Zhang 		if (qp_ctx->order_ring) {
2858a48e039SFan Zhang 			rte_ring_free(qp_ctx->order_ring);
2868a48e039SFan Zhang 			qp_ctx->order_ring = NULL;
28757523e68SFan Zhang 		}
28857523e68SFan Zhang 
28957523e68SFan Zhang 		if (qp_ctx->private_qp_ctx) {
29057523e68SFan Zhang 			rte_free(qp_ctx->private_qp_ctx);
29157523e68SFan Zhang 			qp_ctx->private_qp_ctx = NULL;
29257523e68SFan Zhang 		}
29357523e68SFan Zhang 	}
29457523e68SFan Zhang 
29557523e68SFan Zhang 	if (sched_ctx->private_ctx)
29657523e68SFan Zhang 		rte_free(sched_ctx->private_ctx);
29757523e68SFan Zhang 
29857523e68SFan Zhang 	if (sched_ctx->capabilities)
29957523e68SFan Zhang 		rte_free(sched_ctx->capabilities);
30057523e68SFan Zhang 
30157523e68SFan Zhang 	return 0;
30257523e68SFan Zhang }
30357523e68SFan Zhang 
30457523e68SFan Zhang /** Get device statistics */
30557523e68SFan Zhang static void
30657523e68SFan Zhang scheduler_pmd_stats_get(struct rte_cryptodev *dev,
30757523e68SFan Zhang 	struct rte_cryptodev_stats *stats)
30857523e68SFan Zhang {
30957523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
31057523e68SFan Zhang 	uint32_t i;
31157523e68SFan Zhang 
31257523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
31357523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
31457523e68SFan Zhang 		struct rte_cryptodev *slave_dev =
31557523e68SFan Zhang 				rte_cryptodev_pmd_get_dev(slave_dev_id);
31657523e68SFan Zhang 		struct rte_cryptodev_stats slave_stats = {0};
31757523e68SFan Zhang 
31857523e68SFan Zhang 		(*slave_dev->dev_ops->stats_get)(slave_dev, &slave_stats);
31957523e68SFan Zhang 
32057523e68SFan Zhang 		stats->enqueued_count += slave_stats.enqueued_count;
32157523e68SFan Zhang 		stats->dequeued_count += slave_stats.dequeued_count;
32257523e68SFan Zhang 
32357523e68SFan Zhang 		stats->enqueue_err_count += slave_stats.enqueue_err_count;
32457523e68SFan Zhang 		stats->dequeue_err_count += slave_stats.dequeue_err_count;
32557523e68SFan Zhang 	}
32657523e68SFan Zhang }
32757523e68SFan Zhang 
32857523e68SFan Zhang /** Reset device statistics */
32957523e68SFan Zhang static void
33057523e68SFan Zhang scheduler_pmd_stats_reset(struct rte_cryptodev *dev)
33157523e68SFan Zhang {
33257523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
33357523e68SFan Zhang 	uint32_t i;
33457523e68SFan Zhang 
33557523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
33657523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
33757523e68SFan Zhang 		struct rte_cryptodev *slave_dev =
33857523e68SFan Zhang 				rte_cryptodev_pmd_get_dev(slave_dev_id);
33957523e68SFan Zhang 
34057523e68SFan Zhang 		(*slave_dev->dev_ops->stats_reset)(slave_dev);
34157523e68SFan Zhang 	}
34257523e68SFan Zhang }
34357523e68SFan Zhang 
34457523e68SFan Zhang /** Get device info */
34557523e68SFan Zhang static void
34657523e68SFan Zhang scheduler_pmd_info_get(struct rte_cryptodev *dev,
34757523e68SFan Zhang 		struct rte_cryptodev_info *dev_info)
34857523e68SFan Zhang {
34957523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
35057523e68SFan Zhang 	uint32_t max_nb_sessions = sched_ctx->nb_slaves ?
35157523e68SFan Zhang 			UINT32_MAX : RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS;
35257523e68SFan Zhang 	uint32_t i;
35357523e68SFan Zhang 
35457523e68SFan Zhang 	if (!dev_info)
35557523e68SFan Zhang 		return;
35657523e68SFan Zhang 
35750e14527SFan Zhang 	/* although scheduler_attach_init_slave presents multiple times,
35850e14527SFan Zhang 	 * there will be only 1 meaningful execution.
35950e14527SFan Zhang 	 */
36050e14527SFan Zhang 	scheduler_attach_init_slave(dev);
36150e14527SFan Zhang 
36257523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
36357523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
36457523e68SFan Zhang 		struct rte_cryptodev_info slave_info;
36557523e68SFan Zhang 
36657523e68SFan Zhang 		rte_cryptodev_info_get(slave_dev_id, &slave_info);
36757523e68SFan Zhang 		max_nb_sessions = slave_info.sym.max_nb_sessions <
36857523e68SFan Zhang 				max_nb_sessions ?
36957523e68SFan Zhang 				slave_info.sym.max_nb_sessions :
37057523e68SFan Zhang 				max_nb_sessions;
37157523e68SFan Zhang 	}
37257523e68SFan Zhang 
3737a364faeSSlawomir Mrozowicz 	dev_info->driver_id = dev->driver_id;
37457523e68SFan Zhang 	dev_info->feature_flags = dev->feature_flags;
37557523e68SFan Zhang 	dev_info->capabilities = sched_ctx->capabilities;
37657523e68SFan Zhang 	dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
37757523e68SFan Zhang 	dev_info->sym.max_nb_sessions = max_nb_sessions;
37857523e68SFan Zhang }
37957523e68SFan Zhang 
38057523e68SFan Zhang /** Release queue pair */
38157523e68SFan Zhang static int
38257523e68SFan Zhang scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
38357523e68SFan Zhang {
38457523e68SFan Zhang 	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
38557523e68SFan Zhang 
38657523e68SFan Zhang 	if (!qp_ctx)
38757523e68SFan Zhang 		return 0;
38857523e68SFan Zhang 
3898a48e039SFan Zhang 	if (qp_ctx->order_ring)
3908a48e039SFan Zhang 		rte_ring_free(qp_ctx->order_ring);
39157523e68SFan Zhang 	if (qp_ctx->private_qp_ctx)
39257523e68SFan Zhang 		rte_free(qp_ctx->private_qp_ctx);
39357523e68SFan Zhang 
39457523e68SFan Zhang 	rte_free(qp_ctx);
39557523e68SFan Zhang 	dev->data->queue_pairs[qp_id] = NULL;
39657523e68SFan Zhang 
39757523e68SFan Zhang 	return 0;
39857523e68SFan Zhang }
39957523e68SFan Zhang 
40057523e68SFan Zhang /** Setup a queue pair */
40157523e68SFan Zhang static int
40257523e68SFan Zhang scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
403c281019bSFan Zhang 	const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
40457523e68SFan Zhang {
40557523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
40657523e68SFan Zhang 	struct scheduler_qp_ctx *qp_ctx;
40757523e68SFan Zhang 	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
408c281019bSFan Zhang 	uint32_t i;
409c281019bSFan Zhang 	int ret;
41057523e68SFan Zhang 
41157523e68SFan Zhang 	if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
41257523e68SFan Zhang 			"CRYTO_SCHE PMD %u QP %u",
41357523e68SFan Zhang 			dev->data->dev_id, qp_id) < 0) {
41457523e68SFan Zhang 		CS_LOG_ERR("Failed to create unique queue pair name");
41557523e68SFan Zhang 		return -EFAULT;
41657523e68SFan Zhang 	}
41757523e68SFan Zhang 
41857523e68SFan Zhang 	/* Free memory prior to re-allocation if needed. */
41957523e68SFan Zhang 	if (dev->data->queue_pairs[qp_id] != NULL)
42057523e68SFan Zhang 		scheduler_pmd_qp_release(dev, qp_id);
42157523e68SFan Zhang 
422c281019bSFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
423c281019bSFan Zhang 		uint8_t slave_id = sched_ctx->slaves[i].dev_id;
424c281019bSFan Zhang 
425c281019bSFan Zhang 		ret = rte_cryptodev_queue_pair_setup(slave_id, qp_id,
426c281019bSFan Zhang 				qp_conf, socket_id);
427c281019bSFan Zhang 		if (ret < 0)
428c281019bSFan Zhang 			return ret;
429c281019bSFan Zhang 	}
430c281019bSFan Zhang 
43157523e68SFan Zhang 	/* Allocate the queue pair data structure. */
43257523e68SFan Zhang 	qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
43357523e68SFan Zhang 			socket_id);
43457523e68SFan Zhang 	if (qp_ctx == NULL)
43557523e68SFan Zhang 		return -ENOMEM;
43657523e68SFan Zhang 
43788405476SFan Zhang 	/* The actual available object number = nb_descriptors - 1 */
43888405476SFan Zhang 	qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1;
43988405476SFan Zhang 
44057523e68SFan Zhang 	dev->data->queue_pairs[qp_id] = qp_ctx;
44157523e68SFan Zhang 
44250e14527SFan Zhang 	/* although scheduler_attach_init_slave presents multiple times,
44350e14527SFan Zhang 	 * there will be only 1 meaningful execution.
44450e14527SFan Zhang 	 */
44550e14527SFan Zhang 	ret = scheduler_attach_init_slave(dev);
44650e14527SFan Zhang 	if (ret < 0) {
44750e14527SFan Zhang 		CS_LOG_ERR("Failed to attach slave");
44850e14527SFan Zhang 		scheduler_pmd_qp_release(dev, qp_id);
44950e14527SFan Zhang 		return ret;
45050e14527SFan Zhang 	}
45150e14527SFan Zhang 
45257523e68SFan Zhang 	if (*sched_ctx->ops.config_queue_pair) {
45357523e68SFan Zhang 		if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) {
45457523e68SFan Zhang 			CS_LOG_ERR("Unable to configure queue pair");
45557523e68SFan Zhang 			return -1;
45657523e68SFan Zhang 		}
45757523e68SFan Zhang 	}
45857523e68SFan Zhang 
45957523e68SFan Zhang 	return 0;
46057523e68SFan Zhang }
46157523e68SFan Zhang 
46257523e68SFan Zhang /** Start queue pair */
46357523e68SFan Zhang static int
46457523e68SFan Zhang scheduler_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
46557523e68SFan Zhang 		__rte_unused uint16_t queue_pair_id)
46657523e68SFan Zhang {
46757523e68SFan Zhang 	return -ENOTSUP;
46857523e68SFan Zhang }
46957523e68SFan Zhang 
47057523e68SFan Zhang /** Stop queue pair */
47157523e68SFan Zhang static int
47257523e68SFan Zhang scheduler_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
47357523e68SFan Zhang 		__rte_unused uint16_t queue_pair_id)
47457523e68SFan Zhang {
47557523e68SFan Zhang 	return -ENOTSUP;
47657523e68SFan Zhang }
47757523e68SFan Zhang 
47857523e68SFan Zhang /** Return the number of allocated queue pairs */
47957523e68SFan Zhang static uint32_t
48057523e68SFan Zhang scheduler_pmd_qp_count(struct rte_cryptodev *dev)
48157523e68SFan Zhang {
48257523e68SFan Zhang 	return dev->data->nb_queue_pairs;
48357523e68SFan Zhang }
48457523e68SFan Zhang 
48557523e68SFan Zhang static uint32_t
48657523e68SFan Zhang scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
48757523e68SFan Zhang {
488*b3bbd9e5SSlawomir Mrozowicz 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
489*b3bbd9e5SSlawomir Mrozowicz 	uint8_t i = 0;
490*b3bbd9e5SSlawomir Mrozowicz 	uint32_t max_priv_sess_size = 0;
491*b3bbd9e5SSlawomir Mrozowicz 
492*b3bbd9e5SSlawomir Mrozowicz 	/* Check what is the maximum private session size for all slaves */
493*b3bbd9e5SSlawomir Mrozowicz 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
494*b3bbd9e5SSlawomir Mrozowicz 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
495*b3bbd9e5SSlawomir Mrozowicz 		struct rte_cryptodev *dev = &rte_cryptodevs[slave_dev_id];
496*b3bbd9e5SSlawomir Mrozowicz 		uint32_t priv_sess_size = (*dev->dev_ops->session_get_size)(dev);
497*b3bbd9e5SSlawomir Mrozowicz 
498*b3bbd9e5SSlawomir Mrozowicz 		if (max_priv_sess_size < priv_sess_size)
499*b3bbd9e5SSlawomir Mrozowicz 			max_priv_sess_size = priv_sess_size;
500*b3bbd9e5SSlawomir Mrozowicz 	}
501*b3bbd9e5SSlawomir Mrozowicz 
502*b3bbd9e5SSlawomir Mrozowicz 	return max_priv_sess_size;
50357523e68SFan Zhang }
50457523e68SFan Zhang 
50557523e68SFan Zhang static int
506*b3bbd9e5SSlawomir Mrozowicz scheduler_pmd_session_configure(struct rte_cryptodev *dev,
50757523e68SFan Zhang 	struct rte_crypto_sym_xform *xform,
508*b3bbd9e5SSlawomir Mrozowicz 	struct rte_cryptodev_sym_session *sess,
509*b3bbd9e5SSlawomir Mrozowicz 	struct rte_mempool *mempool)
51057523e68SFan Zhang {
511*b3bbd9e5SSlawomir Mrozowicz 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
51257523e68SFan Zhang 	uint32_t i;
51357523e68SFan Zhang 
51457523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
51557523e68SFan Zhang 		struct scheduler_slave *slave = &sched_ctx->slaves[i];
51657523e68SFan Zhang 
517*b3bbd9e5SSlawomir Mrozowicz 		if (rte_cryptodev_sym_session_init(slave->dev_id, sess,
518*b3bbd9e5SSlawomir Mrozowicz 					xform, mempool) < 0) {
519*b3bbd9e5SSlawomir Mrozowicz 			CS_LOG_ERR("unabled to config sym session");
52057523e68SFan Zhang 			return -1;
52157523e68SFan Zhang 		}
52257523e68SFan Zhang 	}
52357523e68SFan Zhang 
52457523e68SFan Zhang 	return 0;
52557523e68SFan Zhang }
52657523e68SFan Zhang 
52757523e68SFan Zhang /** Clear the memory of session so it doesn't leave key material behind */
52857523e68SFan Zhang static void
52957523e68SFan Zhang scheduler_pmd_session_clear(struct rte_cryptodev *dev,
530*b3bbd9e5SSlawomir Mrozowicz 		struct rte_cryptodev_sym_session *sess)
53157523e68SFan Zhang {
53257523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
533*b3bbd9e5SSlawomir Mrozowicz 	uint32_t i;
53457523e68SFan Zhang 
535*b3bbd9e5SSlawomir Mrozowicz 	/* Clear private data of slaves */
536*b3bbd9e5SSlawomir Mrozowicz 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
537*b3bbd9e5SSlawomir Mrozowicz 		struct scheduler_slave *slave = &sched_ctx->slaves[i];
53857523e68SFan Zhang 
539*b3bbd9e5SSlawomir Mrozowicz 		rte_cryptodev_sym_session_clear(slave->dev_id, sess);
54057523e68SFan Zhang 	}
54157523e68SFan Zhang }
54257523e68SFan Zhang 
54357523e68SFan Zhang struct rte_cryptodev_ops scheduler_pmd_ops = {
54457523e68SFan Zhang 		.dev_configure		= scheduler_pmd_config,
54557523e68SFan Zhang 		.dev_start		= scheduler_pmd_start,
54657523e68SFan Zhang 		.dev_stop		= scheduler_pmd_stop,
54757523e68SFan Zhang 		.dev_close		= scheduler_pmd_close,
54857523e68SFan Zhang 
54957523e68SFan Zhang 		.stats_get		= scheduler_pmd_stats_get,
55057523e68SFan Zhang 		.stats_reset		= scheduler_pmd_stats_reset,
55157523e68SFan Zhang 
55257523e68SFan Zhang 		.dev_infos_get		= scheduler_pmd_info_get,
55357523e68SFan Zhang 
55457523e68SFan Zhang 		.queue_pair_setup	= scheduler_pmd_qp_setup,
55557523e68SFan Zhang 		.queue_pair_release	= scheduler_pmd_qp_release,
55657523e68SFan Zhang 		.queue_pair_start	= scheduler_pmd_qp_start,
55757523e68SFan Zhang 		.queue_pair_stop	= scheduler_pmd_qp_stop,
55857523e68SFan Zhang 		.queue_pair_count	= scheduler_pmd_qp_count,
55957523e68SFan Zhang 
56057523e68SFan Zhang 		.session_get_size	= scheduler_pmd_session_get_size,
56157523e68SFan Zhang 		.session_configure	= scheduler_pmd_session_configure,
56257523e68SFan Zhang 		.session_clear		= scheduler_pmd_session_clear,
56357523e68SFan Zhang };
56457523e68SFan Zhang 
56557523e68SFan Zhang struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;
566