xref: /dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c (revision 88405476dca9fa30d1e77d72e9590faa312244b8)
157523e68SFan Zhang /*-
257523e68SFan Zhang  *   BSD LICENSE
357523e68SFan Zhang  *
457523e68SFan Zhang  *   Copyright(c) 2017 Intel Corporation. All rights reserved.
557523e68SFan Zhang  *
657523e68SFan Zhang  *   Redistribution and use in source and binary forms, with or without
757523e68SFan Zhang  *   modification, are permitted provided that the following conditions
857523e68SFan Zhang  *   are met:
957523e68SFan Zhang  *
1057523e68SFan Zhang  *     * Redistributions of source code must retain the above copyright
1157523e68SFan Zhang  *       notice, this list of conditions and the following disclaimer.
1257523e68SFan Zhang  *     * Redistributions in binary form must reproduce the above copyright
1357523e68SFan Zhang  *       notice, this list of conditions and the following disclaimer in
1457523e68SFan Zhang  *       the documentation and/or other materials provided with the
1557523e68SFan Zhang  *       distribution.
1657523e68SFan Zhang  *     * Neither the name of Intel Corporation nor the names of its
1757523e68SFan Zhang  *       contributors may be used to endorse or promote products derived
1857523e68SFan Zhang  *       from this software without specific prior written permission.
1957523e68SFan Zhang  *
2057523e68SFan Zhang  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
2157523e68SFan Zhang  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
2257523e68SFan Zhang  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
2357523e68SFan Zhang  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
2457523e68SFan Zhang  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2557523e68SFan Zhang  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2657523e68SFan Zhang  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2757523e68SFan Zhang  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2857523e68SFan Zhang  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2957523e68SFan Zhang  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
3057523e68SFan Zhang  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3157523e68SFan Zhang  */
3257523e68SFan Zhang #include <string.h>
3357523e68SFan Zhang 
3457523e68SFan Zhang #include <rte_config.h>
3557523e68SFan Zhang #include <rte_common.h>
3657523e68SFan Zhang #include <rte_malloc.h>
3757523e68SFan Zhang #include <rte_dev.h>
3857523e68SFan Zhang #include <rte_cryptodev.h>
3957523e68SFan Zhang #include <rte_cryptodev_pmd.h>
4057523e68SFan Zhang #include <rte_reorder.h>
4157523e68SFan Zhang 
4257523e68SFan Zhang #include "scheduler_pmd_private.h"
4357523e68SFan Zhang 
4457523e68SFan Zhang /** Configure device */
4557523e68SFan Zhang static int
4660e686c2SFan Zhang scheduler_pmd_config(struct rte_cryptodev *dev,
4760e686c2SFan Zhang 		struct rte_cryptodev_config *config)
4857523e68SFan Zhang {
4957523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
5057523e68SFan Zhang 	uint32_t i;
5157523e68SFan Zhang 	int ret = 0;
5257523e68SFan Zhang 
5357523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
5457523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
5557523e68SFan Zhang 
56c281019bSFan Zhang 		ret = rte_cryptodev_configure(slave_dev_id, config);
5757523e68SFan Zhang 		if (ret < 0)
5857523e68SFan Zhang 			break;
5957523e68SFan Zhang 	}
6057523e68SFan Zhang 
6157523e68SFan Zhang 	return ret;
6257523e68SFan Zhang }
6357523e68SFan Zhang 
6457523e68SFan Zhang static int
658a48e039SFan Zhang update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
6657523e68SFan Zhang {
6757523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
6857523e68SFan Zhang 	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
6957523e68SFan Zhang 
7057523e68SFan Zhang 	if (sched_ctx->reordering_enabled) {
718a48e039SFan Zhang 		char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
728a48e039SFan Zhang 		uint32_t buff_size = rte_align32pow2(
738a48e039SFan Zhang 			sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE);
7457523e68SFan Zhang 
758a48e039SFan Zhang 		if (qp_ctx->order_ring) {
768a48e039SFan Zhang 			rte_ring_free(qp_ctx->order_ring);
778a48e039SFan Zhang 			qp_ctx->order_ring = NULL;
7857523e68SFan Zhang 		}
7957523e68SFan Zhang 
8057523e68SFan Zhang 		if (!buff_size)
8157523e68SFan Zhang 			return 0;
8257523e68SFan Zhang 
838a48e039SFan Zhang 		if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN,
8457523e68SFan Zhang 			"%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
8557523e68SFan Zhang 			dev->data->dev_id, qp_id) < 0) {
8657523e68SFan Zhang 			CS_LOG_ERR("failed to create unique reorder buffer "
8757523e68SFan Zhang 					"name");
8857523e68SFan Zhang 			return -ENOMEM;
8957523e68SFan Zhang 		}
9057523e68SFan Zhang 
918a48e039SFan Zhang 		qp_ctx->order_ring = rte_ring_create(order_ring_name,
928a48e039SFan Zhang 				buff_size, rte_socket_id(),
938a48e039SFan Zhang 				RING_F_SP_ENQ | RING_F_SC_DEQ);
948a48e039SFan Zhang 		if (!qp_ctx->order_ring) {
958a48e039SFan Zhang 			CS_LOG_ERR("failed to create order ring");
9657523e68SFan Zhang 			return -ENOMEM;
9757523e68SFan Zhang 		}
9857523e68SFan Zhang 	} else {
998a48e039SFan Zhang 		if (qp_ctx->order_ring) {
1008a48e039SFan Zhang 			rte_ring_free(qp_ctx->order_ring);
1018a48e039SFan Zhang 			qp_ctx->order_ring = NULL;
10257523e68SFan Zhang 		}
10357523e68SFan Zhang 	}
10457523e68SFan Zhang 
10557523e68SFan Zhang 	return 0;
10657523e68SFan Zhang }
10757523e68SFan Zhang 
10857523e68SFan Zhang /** Start device */
10957523e68SFan Zhang static int
11057523e68SFan Zhang scheduler_pmd_start(struct rte_cryptodev *dev)
11157523e68SFan Zhang {
11257523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
11357523e68SFan Zhang 	uint32_t i;
11457523e68SFan Zhang 	int ret;
11557523e68SFan Zhang 
11657523e68SFan Zhang 	if (dev->data->dev_started)
11757523e68SFan Zhang 		return 0;
11857523e68SFan Zhang 
11957523e68SFan Zhang 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1208a48e039SFan Zhang 		ret = update_order_ring(dev, i);
12157523e68SFan Zhang 		if (ret < 0) {
12257523e68SFan Zhang 			CS_LOG_ERR("Failed to update reorder buffer");
12357523e68SFan Zhang 			return ret;
12457523e68SFan Zhang 		}
12557523e68SFan Zhang 	}
12657523e68SFan Zhang 
12757523e68SFan Zhang 	if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) {
12857523e68SFan Zhang 		CS_LOG_ERR("Scheduler mode is not set");
12957523e68SFan Zhang 		return -1;
13057523e68SFan Zhang 	}
13157523e68SFan Zhang 
13257523e68SFan Zhang 	if (!sched_ctx->nb_slaves) {
13357523e68SFan Zhang 		CS_LOG_ERR("No slave in the scheduler");
13457523e68SFan Zhang 		return -1;
13557523e68SFan Zhang 	}
13657523e68SFan Zhang 
13757523e68SFan Zhang 	RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP);
13857523e68SFan Zhang 
13957523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
14057523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
14157523e68SFan Zhang 
14257523e68SFan Zhang 		if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) {
14357523e68SFan Zhang 			CS_LOG_ERR("Failed to attach slave");
14457523e68SFan Zhang 			return -ENOTSUP;
14557523e68SFan Zhang 		}
14657523e68SFan Zhang 	}
14757523e68SFan Zhang 
14857523e68SFan Zhang 	RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP);
14957523e68SFan Zhang 
15057523e68SFan Zhang 	if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
15157523e68SFan Zhang 		CS_LOG_ERR("Scheduler start failed");
15257523e68SFan Zhang 		return -1;
15357523e68SFan Zhang 	}
15457523e68SFan Zhang 
15557523e68SFan Zhang 	/* start all slaves */
15657523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
15757523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
15857523e68SFan Zhang 		struct rte_cryptodev *slave_dev =
15957523e68SFan Zhang 				rte_cryptodev_pmd_get_dev(slave_dev_id);
16057523e68SFan Zhang 
16157523e68SFan Zhang 		ret = (*slave_dev->dev_ops->dev_start)(slave_dev);
16257523e68SFan Zhang 		if (ret < 0) {
16357523e68SFan Zhang 			CS_LOG_ERR("Failed to start slave dev %u",
16457523e68SFan Zhang 					slave_dev_id);
16557523e68SFan Zhang 			return ret;
16657523e68SFan Zhang 		}
16757523e68SFan Zhang 	}
16857523e68SFan Zhang 
16957523e68SFan Zhang 	return 0;
17057523e68SFan Zhang }
17157523e68SFan Zhang 
17257523e68SFan Zhang /** Stop device */
17357523e68SFan Zhang static void
17457523e68SFan Zhang scheduler_pmd_stop(struct rte_cryptodev *dev)
17557523e68SFan Zhang {
17657523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
17757523e68SFan Zhang 	uint32_t i;
17857523e68SFan Zhang 
17957523e68SFan Zhang 	if (!dev->data->dev_started)
18057523e68SFan Zhang 		return;
18157523e68SFan Zhang 
18257523e68SFan Zhang 	/* stop all slaves first */
18357523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
18457523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
18557523e68SFan Zhang 		struct rte_cryptodev *slave_dev =
18657523e68SFan Zhang 				rte_cryptodev_pmd_get_dev(slave_dev_id);
18757523e68SFan Zhang 
18857523e68SFan Zhang 		(*slave_dev->dev_ops->dev_stop)(slave_dev);
18957523e68SFan Zhang 	}
19057523e68SFan Zhang 
19157523e68SFan Zhang 	if (*sched_ctx->ops.scheduler_stop)
19257523e68SFan Zhang 		(*sched_ctx->ops.scheduler_stop)(dev);
19357523e68SFan Zhang 
19457523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
19557523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
19657523e68SFan Zhang 
19757523e68SFan Zhang 		if (*sched_ctx->ops.slave_detach)
19857523e68SFan Zhang 			(*sched_ctx->ops.slave_detach)(dev, slave_dev_id);
19957523e68SFan Zhang 	}
20057523e68SFan Zhang }
20157523e68SFan Zhang 
20257523e68SFan Zhang /** Close device */
20357523e68SFan Zhang static int
20457523e68SFan Zhang scheduler_pmd_close(struct rte_cryptodev *dev)
20557523e68SFan Zhang {
20657523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
20757523e68SFan Zhang 	uint32_t i;
20857523e68SFan Zhang 	int ret;
20957523e68SFan Zhang 
21057523e68SFan Zhang 	/* the dev should be stopped before being closed */
21157523e68SFan Zhang 	if (dev->data->dev_started)
21257523e68SFan Zhang 		return -EBUSY;
21357523e68SFan Zhang 
21457523e68SFan Zhang 	/* close all slaves first */
21557523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
21657523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
21757523e68SFan Zhang 		struct rte_cryptodev *slave_dev =
21857523e68SFan Zhang 				rte_cryptodev_pmd_get_dev(slave_dev_id);
21957523e68SFan Zhang 
22057523e68SFan Zhang 		ret = (*slave_dev->dev_ops->dev_close)(slave_dev);
22157523e68SFan Zhang 		if (ret < 0)
22257523e68SFan Zhang 			return ret;
22357523e68SFan Zhang 	}
22457523e68SFan Zhang 
22557523e68SFan Zhang 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
22657523e68SFan Zhang 		struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
22757523e68SFan Zhang 
2288a48e039SFan Zhang 		if (qp_ctx->order_ring) {
2298a48e039SFan Zhang 			rte_ring_free(qp_ctx->order_ring);
2308a48e039SFan Zhang 			qp_ctx->order_ring = NULL;
23157523e68SFan Zhang 		}
23257523e68SFan Zhang 
23357523e68SFan Zhang 		if (qp_ctx->private_qp_ctx) {
23457523e68SFan Zhang 			rte_free(qp_ctx->private_qp_ctx);
23557523e68SFan Zhang 			qp_ctx->private_qp_ctx = NULL;
23657523e68SFan Zhang 		}
23757523e68SFan Zhang 	}
23857523e68SFan Zhang 
23957523e68SFan Zhang 	if (sched_ctx->private_ctx)
24057523e68SFan Zhang 		rte_free(sched_ctx->private_ctx);
24157523e68SFan Zhang 
24257523e68SFan Zhang 	if (sched_ctx->capabilities)
24357523e68SFan Zhang 		rte_free(sched_ctx->capabilities);
24457523e68SFan Zhang 
24557523e68SFan Zhang 	return 0;
24657523e68SFan Zhang }
24757523e68SFan Zhang 
24857523e68SFan Zhang /** Get device statistics */
24957523e68SFan Zhang static void
25057523e68SFan Zhang scheduler_pmd_stats_get(struct rte_cryptodev *dev,
25157523e68SFan Zhang 	struct rte_cryptodev_stats *stats)
25257523e68SFan Zhang {
25357523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
25457523e68SFan Zhang 	uint32_t i;
25557523e68SFan Zhang 
25657523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
25757523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
25857523e68SFan Zhang 		struct rte_cryptodev *slave_dev =
25957523e68SFan Zhang 				rte_cryptodev_pmd_get_dev(slave_dev_id);
26057523e68SFan Zhang 		struct rte_cryptodev_stats slave_stats = {0};
26157523e68SFan Zhang 
26257523e68SFan Zhang 		(*slave_dev->dev_ops->stats_get)(slave_dev, &slave_stats);
26357523e68SFan Zhang 
26457523e68SFan Zhang 		stats->enqueued_count += slave_stats.enqueued_count;
26557523e68SFan Zhang 		stats->dequeued_count += slave_stats.dequeued_count;
26657523e68SFan Zhang 
26757523e68SFan Zhang 		stats->enqueue_err_count += slave_stats.enqueue_err_count;
26857523e68SFan Zhang 		stats->dequeue_err_count += slave_stats.dequeue_err_count;
26957523e68SFan Zhang 	}
27057523e68SFan Zhang }
27157523e68SFan Zhang 
27257523e68SFan Zhang /** Reset device statistics */
27357523e68SFan Zhang static void
27457523e68SFan Zhang scheduler_pmd_stats_reset(struct rte_cryptodev *dev)
27557523e68SFan Zhang {
27657523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
27757523e68SFan Zhang 	uint32_t i;
27857523e68SFan Zhang 
27957523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
28057523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
28157523e68SFan Zhang 		struct rte_cryptodev *slave_dev =
28257523e68SFan Zhang 				rte_cryptodev_pmd_get_dev(slave_dev_id);
28357523e68SFan Zhang 
28457523e68SFan Zhang 		(*slave_dev->dev_ops->stats_reset)(slave_dev);
28557523e68SFan Zhang 	}
28657523e68SFan Zhang }
28757523e68SFan Zhang 
28857523e68SFan Zhang /** Get device info */
28957523e68SFan Zhang static void
29057523e68SFan Zhang scheduler_pmd_info_get(struct rte_cryptodev *dev,
29157523e68SFan Zhang 		struct rte_cryptodev_info *dev_info)
29257523e68SFan Zhang {
29357523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
29457523e68SFan Zhang 	uint32_t max_nb_sessions = sched_ctx->nb_slaves ?
29557523e68SFan Zhang 			UINT32_MAX : RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS;
29657523e68SFan Zhang 	uint32_t i;
29757523e68SFan Zhang 
29857523e68SFan Zhang 	if (!dev_info)
29957523e68SFan Zhang 		return;
30057523e68SFan Zhang 
30157523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
30257523e68SFan Zhang 		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
30357523e68SFan Zhang 		struct rte_cryptodev_info slave_info;
30457523e68SFan Zhang 
30557523e68SFan Zhang 		rte_cryptodev_info_get(slave_dev_id, &slave_info);
30657523e68SFan Zhang 		max_nb_sessions = slave_info.sym.max_nb_sessions <
30757523e68SFan Zhang 				max_nb_sessions ?
30857523e68SFan Zhang 				slave_info.sym.max_nb_sessions :
30957523e68SFan Zhang 				max_nb_sessions;
31057523e68SFan Zhang 	}
31157523e68SFan Zhang 
31257523e68SFan Zhang 	dev_info->dev_type = dev->dev_type;
31357523e68SFan Zhang 	dev_info->feature_flags = dev->feature_flags;
31457523e68SFan Zhang 	dev_info->capabilities = sched_ctx->capabilities;
31557523e68SFan Zhang 	dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
31657523e68SFan Zhang 	dev_info->sym.max_nb_sessions = max_nb_sessions;
31757523e68SFan Zhang }
31857523e68SFan Zhang 
31957523e68SFan Zhang /** Release queue pair */
32057523e68SFan Zhang static int
32157523e68SFan Zhang scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
32257523e68SFan Zhang {
32357523e68SFan Zhang 	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
32457523e68SFan Zhang 
32557523e68SFan Zhang 	if (!qp_ctx)
32657523e68SFan Zhang 		return 0;
32757523e68SFan Zhang 
3288a48e039SFan Zhang 	if (qp_ctx->order_ring)
3298a48e039SFan Zhang 		rte_ring_free(qp_ctx->order_ring);
33057523e68SFan Zhang 	if (qp_ctx->private_qp_ctx)
33157523e68SFan Zhang 		rte_free(qp_ctx->private_qp_ctx);
33257523e68SFan Zhang 
33357523e68SFan Zhang 	rte_free(qp_ctx);
33457523e68SFan Zhang 	dev->data->queue_pairs[qp_id] = NULL;
33557523e68SFan Zhang 
33657523e68SFan Zhang 	return 0;
33757523e68SFan Zhang }
33857523e68SFan Zhang 
33957523e68SFan Zhang /** Setup a queue pair */
34057523e68SFan Zhang static int
34157523e68SFan Zhang scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
342c281019bSFan Zhang 	const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
34357523e68SFan Zhang {
34457523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
34557523e68SFan Zhang 	struct scheduler_qp_ctx *qp_ctx;
34657523e68SFan Zhang 	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
347c281019bSFan Zhang 	uint32_t i;
348c281019bSFan Zhang 	int ret;
34957523e68SFan Zhang 
35057523e68SFan Zhang 	if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
35157523e68SFan Zhang 			"CRYTO_SCHE PMD %u QP %u",
35257523e68SFan Zhang 			dev->data->dev_id, qp_id) < 0) {
35357523e68SFan Zhang 		CS_LOG_ERR("Failed to create unique queue pair name");
35457523e68SFan Zhang 		return -EFAULT;
35557523e68SFan Zhang 	}
35657523e68SFan Zhang 
35757523e68SFan Zhang 	/* Free memory prior to re-allocation if needed. */
35857523e68SFan Zhang 	if (dev->data->queue_pairs[qp_id] != NULL)
35957523e68SFan Zhang 		scheduler_pmd_qp_release(dev, qp_id);
36057523e68SFan Zhang 
361c281019bSFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
362c281019bSFan Zhang 		uint8_t slave_id = sched_ctx->slaves[i].dev_id;
363c281019bSFan Zhang 
364c281019bSFan Zhang 		ret = rte_cryptodev_queue_pair_setup(slave_id, qp_id,
365c281019bSFan Zhang 				qp_conf, socket_id);
366c281019bSFan Zhang 		if (ret < 0)
367c281019bSFan Zhang 			return ret;
368c281019bSFan Zhang 	}
369c281019bSFan Zhang 
37057523e68SFan Zhang 	/* Allocate the queue pair data structure. */
37157523e68SFan Zhang 	qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
37257523e68SFan Zhang 			socket_id);
37357523e68SFan Zhang 	if (qp_ctx == NULL)
37457523e68SFan Zhang 		return -ENOMEM;
37557523e68SFan Zhang 
376*88405476SFan Zhang 	/* The actual available object number = nb_descriptors - 1 */
377*88405476SFan Zhang 	qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1;
378*88405476SFan Zhang 
37957523e68SFan Zhang 	dev->data->queue_pairs[qp_id] = qp_ctx;
38057523e68SFan Zhang 
38157523e68SFan Zhang 	if (*sched_ctx->ops.config_queue_pair) {
38257523e68SFan Zhang 		if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) {
38357523e68SFan Zhang 			CS_LOG_ERR("Unable to configure queue pair");
38457523e68SFan Zhang 			return -1;
38557523e68SFan Zhang 		}
38657523e68SFan Zhang 	}
38757523e68SFan Zhang 
38857523e68SFan Zhang 	return 0;
38957523e68SFan Zhang }
39057523e68SFan Zhang 
39157523e68SFan Zhang /** Start queue pair */
39257523e68SFan Zhang static int
39357523e68SFan Zhang scheduler_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
39457523e68SFan Zhang 		__rte_unused uint16_t queue_pair_id)
39557523e68SFan Zhang {
39657523e68SFan Zhang 	return -ENOTSUP;
39757523e68SFan Zhang }
39857523e68SFan Zhang 
39957523e68SFan Zhang /** Stop queue pair */
40057523e68SFan Zhang static int
40157523e68SFan Zhang scheduler_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
40257523e68SFan Zhang 		__rte_unused uint16_t queue_pair_id)
40357523e68SFan Zhang {
40457523e68SFan Zhang 	return -ENOTSUP;
40557523e68SFan Zhang }
40657523e68SFan Zhang 
40757523e68SFan Zhang /** Return the number of allocated queue pairs */
40857523e68SFan Zhang static uint32_t
40957523e68SFan Zhang scheduler_pmd_qp_count(struct rte_cryptodev *dev)
41057523e68SFan Zhang {
41157523e68SFan Zhang 	return dev->data->nb_queue_pairs;
41257523e68SFan Zhang }
41357523e68SFan Zhang 
41457523e68SFan Zhang static uint32_t
41557523e68SFan Zhang scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
41657523e68SFan Zhang {
41757523e68SFan Zhang 	return sizeof(struct scheduler_session);
41857523e68SFan Zhang }
41957523e68SFan Zhang 
42057523e68SFan Zhang static int
42157523e68SFan Zhang config_slave_sess(struct scheduler_ctx *sched_ctx,
42257523e68SFan Zhang 		struct rte_crypto_sym_xform *xform,
42357523e68SFan Zhang 		struct scheduler_session *sess,
42457523e68SFan Zhang 		uint32_t create)
42557523e68SFan Zhang {
42657523e68SFan Zhang 	uint32_t i;
42757523e68SFan Zhang 
42857523e68SFan Zhang 	for (i = 0; i < sched_ctx->nb_slaves; i++) {
42957523e68SFan Zhang 		struct scheduler_slave *slave = &sched_ctx->slaves[i];
43057523e68SFan Zhang 
43157523e68SFan Zhang 		if (sess->sessions[i]) {
43257523e68SFan Zhang 			if (create)
43357523e68SFan Zhang 				continue;
43457523e68SFan Zhang 			/* !create */
43544dcd7f5SFan Zhang 			sess->sessions[i] = rte_cryptodev_sym_session_free(
43644dcd7f5SFan Zhang 					slave->dev_id, sess->sessions[i]);
43757523e68SFan Zhang 		} else {
43857523e68SFan Zhang 			if (!create)
43957523e68SFan Zhang 				continue;
44057523e68SFan Zhang 			/* create */
44157523e68SFan Zhang 			sess->sessions[i] =
44257523e68SFan Zhang 					rte_cryptodev_sym_session_create(
44357523e68SFan Zhang 							slave->dev_id, xform);
44457523e68SFan Zhang 			if (!sess->sessions[i]) {
44557523e68SFan Zhang 				config_slave_sess(sched_ctx, NULL, sess, 0);
44657523e68SFan Zhang 				return -1;
44757523e68SFan Zhang 			}
44857523e68SFan Zhang 		}
44957523e68SFan Zhang 	}
45057523e68SFan Zhang 
45157523e68SFan Zhang 	return 0;
45257523e68SFan Zhang }
45357523e68SFan Zhang 
45457523e68SFan Zhang /** Clear the memory of session so it doesn't leave key material behind */
45557523e68SFan Zhang static void
45657523e68SFan Zhang scheduler_pmd_session_clear(struct rte_cryptodev *dev,
45757523e68SFan Zhang 	void *sess)
45857523e68SFan Zhang {
45957523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
46057523e68SFan Zhang 
46157523e68SFan Zhang 	config_slave_sess(sched_ctx, NULL, sess, 0);
46257523e68SFan Zhang 
46357523e68SFan Zhang 	memset(sess, 0, sizeof(struct scheduler_session));
46457523e68SFan Zhang }
46557523e68SFan Zhang 
46657523e68SFan Zhang static void *
46757523e68SFan Zhang scheduler_pmd_session_configure(struct rte_cryptodev *dev,
46857523e68SFan Zhang 	struct rte_crypto_sym_xform *xform, void *sess)
46957523e68SFan Zhang {
47057523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
47157523e68SFan Zhang 
47257523e68SFan Zhang 	if (config_slave_sess(sched_ctx, xform, sess, 1) < 0) {
47357523e68SFan Zhang 		CS_LOG_ERR("unabled to config sym session");
47457523e68SFan Zhang 		return NULL;
47557523e68SFan Zhang 	}
47657523e68SFan Zhang 
47757523e68SFan Zhang 	return sess;
47857523e68SFan Zhang }
47957523e68SFan Zhang 
48057523e68SFan Zhang struct rte_cryptodev_ops scheduler_pmd_ops = {
48157523e68SFan Zhang 		.dev_configure		= scheduler_pmd_config,
48257523e68SFan Zhang 		.dev_start		= scheduler_pmd_start,
48357523e68SFan Zhang 		.dev_stop		= scheduler_pmd_stop,
48457523e68SFan Zhang 		.dev_close		= scheduler_pmd_close,
48557523e68SFan Zhang 
48657523e68SFan Zhang 		.stats_get		= scheduler_pmd_stats_get,
48757523e68SFan Zhang 		.stats_reset		= scheduler_pmd_stats_reset,
48857523e68SFan Zhang 
48957523e68SFan Zhang 		.dev_infos_get		= scheduler_pmd_info_get,
49057523e68SFan Zhang 
49157523e68SFan Zhang 		.queue_pair_setup	= scheduler_pmd_qp_setup,
49257523e68SFan Zhang 		.queue_pair_release	= scheduler_pmd_qp_release,
49357523e68SFan Zhang 		.queue_pair_start	= scheduler_pmd_qp_start,
49457523e68SFan Zhang 		.queue_pair_stop	= scheduler_pmd_qp_stop,
49557523e68SFan Zhang 		.queue_pair_count	= scheduler_pmd_qp_count,
49657523e68SFan Zhang 
49757523e68SFan Zhang 		.session_get_size	= scheduler_pmd_session_get_size,
49857523e68SFan Zhang 		.session_configure	= scheduler_pmd_session_configure,
49957523e68SFan Zhang 		.session_clear		= scheduler_pmd_session_clear,
50057523e68SFan Zhang };
50157523e68SFan Zhang 
50257523e68SFan Zhang struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;
503