xref: /dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c (revision e2af4e403c15b9de0d692288bbea866e981dba4d)
15566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
25566a3e3SBruce Richardson  * Copyright(c) 2017 Intel Corporation
357523e68SFan Zhang  */
457523e68SFan Zhang #include <string.h>
557523e68SFan Zhang 
657523e68SFan Zhang #include <rte_common.h>
757523e68SFan Zhang #include <rte_malloc.h>
81acb7f54SDavid Marchand #include <dev_driver.h>
957523e68SFan Zhang #include <rte_cryptodev.h>
10af668035SAkhil Goyal #include <cryptodev_pmd.h>
11*e2af4e40SDavid Coyle #include <rte_security_driver.h>
1257523e68SFan Zhang #include <rte_reorder.h>
136812b9bfSFan Zhang #include <rte_errno.h>
1457523e68SFan Zhang 
1557523e68SFan Zhang #include "scheduler_pmd_private.h"
1657523e68SFan Zhang 
17*e2af4e40SDavid Coyle struct scheduler_configured_sess_info {
18*e2af4e40SDavid Coyle 	uint8_t dev_id;
19*e2af4e40SDavid Coyle 	uint8_t driver_id;
20*e2af4e40SDavid Coyle 	union {
21*e2af4e40SDavid Coyle 		struct rte_cryptodev_sym_session *sess;
22*e2af4e40SDavid Coyle 		struct {
23*e2af4e40SDavid Coyle 			struct rte_security_session *sec_sess;
24*e2af4e40SDavid Coyle 			struct rte_security_ctx *sec_ctx;
25*e2af4e40SDavid Coyle 		};
26*e2af4e40SDavid Coyle 	};
27*e2af4e40SDavid Coyle };
28*e2af4e40SDavid Coyle 
29*e2af4e40SDavid Coyle static int
30*e2af4e40SDavid Coyle scheduler_session_create(void *sess, void *sess_params,
31*e2af4e40SDavid Coyle 		struct scheduler_ctx *sched_ctx,
32*e2af4e40SDavid Coyle 		enum rte_crypto_op_sess_type session_type)
33*e2af4e40SDavid Coyle {
34*e2af4e40SDavid Coyle 	struct rte_mempool *mp = rte_mempool_from_obj(sess);
35*e2af4e40SDavid Coyle 	struct scheduler_session_ctx *sess_ctx;
36*e2af4e40SDavid Coyle 	struct scheduler_configured_sess_info configured_sess[
37*e2af4e40SDavid Coyle 			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}};
38*e2af4e40SDavid Coyle 	uint32_t i, j, n_configured_sess = 0;
39*e2af4e40SDavid Coyle 	int ret = 0;
40*e2af4e40SDavid Coyle 
41*e2af4e40SDavid Coyle 	if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
42*e2af4e40SDavid Coyle 		sess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
43*e2af4e40SDavid Coyle 	else
44*e2af4e40SDavid Coyle 		sess_ctx = SECURITY_GET_SESS_PRIV(sess);
45*e2af4e40SDavid Coyle 
46*e2af4e40SDavid Coyle 	if (mp == NULL)
47*e2af4e40SDavid Coyle 		return -EINVAL;
48*e2af4e40SDavid Coyle 
49*e2af4e40SDavid Coyle 	for (i = 0; i < sched_ctx->nb_workers; i++) {
50*e2af4e40SDavid Coyle 		struct scheduler_worker *worker = &sched_ctx->workers[i];
51*e2af4e40SDavid Coyle 		struct rte_cryptodev *dev = &rte_cryptodevs[worker->dev_id];
52*e2af4e40SDavid Coyle 		uint8_t next_worker = 0;
53*e2af4e40SDavid Coyle 
54*e2af4e40SDavid Coyle 		for (j = 0; j < n_configured_sess; j++) {
55*e2af4e40SDavid Coyle 			if (configured_sess[j].driver_id == worker->driver_id) {
56*e2af4e40SDavid Coyle 				if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
57*e2af4e40SDavid Coyle 					sess_ctx->worker_sess[i] =
58*e2af4e40SDavid Coyle 						configured_sess[j].sess;
59*e2af4e40SDavid Coyle 				else
60*e2af4e40SDavid Coyle 					sess_ctx->worker_sec_sess[i] =
61*e2af4e40SDavid Coyle 						configured_sess[j].sec_sess;
62*e2af4e40SDavid Coyle 
63*e2af4e40SDavid Coyle 				next_worker = 1;
64*e2af4e40SDavid Coyle 				break;
65*e2af4e40SDavid Coyle 			}
66*e2af4e40SDavid Coyle 		}
67*e2af4e40SDavid Coyle 		if (next_worker)
68*e2af4e40SDavid Coyle 			continue;
69*e2af4e40SDavid Coyle 
70*e2af4e40SDavid Coyle 		if (rte_mempool_avail_count(mp) == 0) {
71*e2af4e40SDavid Coyle 			ret = -ENOMEM;
72*e2af4e40SDavid Coyle 			goto error_exit;
73*e2af4e40SDavid Coyle 		}
74*e2af4e40SDavid Coyle 
75*e2af4e40SDavid Coyle 		if (session_type == RTE_CRYPTO_OP_WITH_SESSION) {
76*e2af4e40SDavid Coyle 			struct rte_cryptodev_sym_session *worker_sess =
77*e2af4e40SDavid Coyle 				rte_cryptodev_sym_session_create(worker->dev_id,
78*e2af4e40SDavid Coyle 						sess_params, mp);
79*e2af4e40SDavid Coyle 
80*e2af4e40SDavid Coyle 			if (worker_sess == NULL) {
81*e2af4e40SDavid Coyle 				ret = -rte_errno;
82*e2af4e40SDavid Coyle 				goto error_exit;
83*e2af4e40SDavid Coyle 			}
84*e2af4e40SDavid Coyle 
85*e2af4e40SDavid Coyle 			worker_sess->opaque_data = (uint64_t)sess;
86*e2af4e40SDavid Coyle 			sess_ctx->worker_sess[i] = worker_sess;
87*e2af4e40SDavid Coyle 			configured_sess[n_configured_sess].sess = worker_sess;
88*e2af4e40SDavid Coyle 		} else {
89*e2af4e40SDavid Coyle 			struct rte_security_session *worker_sess =
90*e2af4e40SDavid Coyle 				rte_security_session_create(dev->security_ctx,
91*e2af4e40SDavid Coyle 						sess_params, mp);
92*e2af4e40SDavid Coyle 
93*e2af4e40SDavid Coyle 			if (worker_sess == NULL) {
94*e2af4e40SDavid Coyle 				ret = -rte_errno;
95*e2af4e40SDavid Coyle 				goto error_exit;
96*e2af4e40SDavid Coyle 			}
97*e2af4e40SDavid Coyle 
98*e2af4e40SDavid Coyle 			worker_sess->opaque_data = (uint64_t)sess;
99*e2af4e40SDavid Coyle 			sess_ctx->worker_sec_sess[i] = worker_sess;
100*e2af4e40SDavid Coyle 			configured_sess[n_configured_sess].sec_sess =
101*e2af4e40SDavid Coyle 							worker_sess;
102*e2af4e40SDavid Coyle 			configured_sess[n_configured_sess].sec_ctx =
103*e2af4e40SDavid Coyle 							dev->security_ctx;
104*e2af4e40SDavid Coyle 		}
105*e2af4e40SDavid Coyle 
106*e2af4e40SDavid Coyle 		configured_sess[n_configured_sess].driver_id =
107*e2af4e40SDavid Coyle 							worker->driver_id;
108*e2af4e40SDavid Coyle 		configured_sess[n_configured_sess].dev_id = worker->dev_id;
109*e2af4e40SDavid Coyle 		n_configured_sess++;
110*e2af4e40SDavid Coyle 	}
111*e2af4e40SDavid Coyle 
112*e2af4e40SDavid Coyle 	return 0;
113*e2af4e40SDavid Coyle 
114*e2af4e40SDavid Coyle error_exit:
115*e2af4e40SDavid Coyle 	sess_ctx->ref_cnt = sched_ctx->ref_cnt;
116*e2af4e40SDavid Coyle 	for (i = 0; i < n_configured_sess; i++) {
117*e2af4e40SDavid Coyle 		if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
118*e2af4e40SDavid Coyle 			rte_cryptodev_sym_session_free(
119*e2af4e40SDavid Coyle 						configured_sess[i].dev_id,
120*e2af4e40SDavid Coyle 						configured_sess[i].sess);
121*e2af4e40SDavid Coyle 		else
122*e2af4e40SDavid Coyle 			rte_security_session_destroy(
123*e2af4e40SDavid Coyle 						configured_sess[i].sec_ctx,
124*e2af4e40SDavid Coyle 						configured_sess[i].sec_sess);
125*e2af4e40SDavid Coyle 	}
126*e2af4e40SDavid Coyle 
127*e2af4e40SDavid Coyle 	return ret;
128*e2af4e40SDavid Coyle }
129*e2af4e40SDavid Coyle 
130*e2af4e40SDavid Coyle static void
131*e2af4e40SDavid Coyle scheduler_session_destroy(void *sess, struct scheduler_ctx *sched_ctx,
132*e2af4e40SDavid Coyle 		uint8_t session_type)
133*e2af4e40SDavid Coyle {
134*e2af4e40SDavid Coyle 	struct scheduler_session_ctx *sess_ctx;
135*e2af4e40SDavid Coyle 	struct scheduler_configured_sess_info deleted_sess[
136*e2af4e40SDavid Coyle 			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}};
137*e2af4e40SDavid Coyle 	uint32_t i, j, n_deleted_sess = 0;
138*e2af4e40SDavid Coyle 
139*e2af4e40SDavid Coyle 	if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
140*e2af4e40SDavid Coyle 		sess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
141*e2af4e40SDavid Coyle 	else
142*e2af4e40SDavid Coyle 		sess_ctx = SECURITY_GET_SESS_PRIV(sess);
143*e2af4e40SDavid Coyle 
144*e2af4e40SDavid Coyle 	if (sched_ctx->ref_cnt != sess_ctx->ref_cnt) {
145*e2af4e40SDavid Coyle 		CR_SCHED_LOG(WARNING,
146*e2af4e40SDavid Coyle 			"Worker updated between session creation/deletion. "
147*e2af4e40SDavid Coyle 			"The session may not be freed fully.");
148*e2af4e40SDavid Coyle 	}
149*e2af4e40SDavid Coyle 
150*e2af4e40SDavid Coyle 	for (i = 0; i < sched_ctx->nb_workers; i++) {
151*e2af4e40SDavid Coyle 		struct scheduler_worker *worker = &sched_ctx->workers[i];
152*e2af4e40SDavid Coyle 		struct rte_cryptodev *dev = &rte_cryptodevs[worker->dev_id];
153*e2af4e40SDavid Coyle 		uint8_t next_worker = 0;
154*e2af4e40SDavid Coyle 
155*e2af4e40SDavid Coyle 		for (j = 0; j < n_deleted_sess; j++) {
156*e2af4e40SDavid Coyle 			if (deleted_sess[j].driver_id == worker->driver_id) {
157*e2af4e40SDavid Coyle 				if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
158*e2af4e40SDavid Coyle 					sess_ctx->worker_sess[i] = NULL;
159*e2af4e40SDavid Coyle 				else
160*e2af4e40SDavid Coyle 					sess_ctx->worker_sec_sess[i] = NULL;
161*e2af4e40SDavid Coyle 
162*e2af4e40SDavid Coyle 				next_worker = 1;
163*e2af4e40SDavid Coyle 				break;
164*e2af4e40SDavid Coyle 			}
165*e2af4e40SDavid Coyle 		}
166*e2af4e40SDavid Coyle 		if (next_worker)
167*e2af4e40SDavid Coyle 			continue;
168*e2af4e40SDavid Coyle 
169*e2af4e40SDavid Coyle 		if (session_type == RTE_CRYPTO_OP_WITH_SESSION) {
170*e2af4e40SDavid Coyle 			rte_cryptodev_sym_session_free(worker->dev_id,
171*e2af4e40SDavid Coyle 						sess_ctx->worker_sess[i]);
172*e2af4e40SDavid Coyle 			sess_ctx->worker_sess[i] = NULL;
173*e2af4e40SDavid Coyle 		} else {
174*e2af4e40SDavid Coyle 			rte_security_session_destroy(dev->security_ctx,
175*e2af4e40SDavid Coyle 						sess_ctx->worker_sec_sess[i]);
176*e2af4e40SDavid Coyle 			sess_ctx->worker_sec_sess[i] = NULL;
177*e2af4e40SDavid Coyle 		}
178*e2af4e40SDavid Coyle 
179*e2af4e40SDavid Coyle 		deleted_sess[n_deleted_sess++].driver_id = worker->driver_id;
180*e2af4e40SDavid Coyle 	}
181*e2af4e40SDavid Coyle }
182*e2af4e40SDavid Coyle 
183*e2af4e40SDavid Coyle static unsigned int
184*e2af4e40SDavid Coyle scheduler_session_size_get(struct scheduler_ctx *sched_ctx,
185*e2af4e40SDavid Coyle 		uint8_t session_type)
186*e2af4e40SDavid Coyle {
187*e2af4e40SDavid Coyle 	uint8_t i = 0;
188*e2af4e40SDavid Coyle 	uint32_t max_priv_sess_size = 0;
189*e2af4e40SDavid Coyle 
190*e2af4e40SDavid Coyle 	/* Check what is the maximum private session size for all workers */
191*e2af4e40SDavid Coyle 	for (i = 0; i < sched_ctx->nb_workers; i++) {
192*e2af4e40SDavid Coyle 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
193*e2af4e40SDavid Coyle 		struct rte_cryptodev *dev = &rte_cryptodevs[worker_dev_id];
194*e2af4e40SDavid Coyle 		struct rte_security_ctx *sec_ctx = dev->security_ctx;
195*e2af4e40SDavid Coyle 		uint32_t priv_sess_size = 0;
196*e2af4e40SDavid Coyle 
197*e2af4e40SDavid Coyle 		if (session_type == RTE_CRYPTO_OP_WITH_SESSION) {
198*e2af4e40SDavid Coyle 			priv_sess_size =
199*e2af4e40SDavid Coyle 				(*dev->dev_ops->sym_session_get_size)(dev);
200*e2af4e40SDavid Coyle 		} else {
201*e2af4e40SDavid Coyle 			priv_sess_size = (*sec_ctx->ops->session_get_size)(dev);
202*e2af4e40SDavid Coyle 		}
203*e2af4e40SDavid Coyle 
204*e2af4e40SDavid Coyle 		max_priv_sess_size = RTE_MAX(max_priv_sess_size, priv_sess_size);
205*e2af4e40SDavid Coyle 	}
206*e2af4e40SDavid Coyle 
207*e2af4e40SDavid Coyle 	return max_priv_sess_size;
208*e2af4e40SDavid Coyle }
209*e2af4e40SDavid Coyle 
21085b00824SAdam Dybkowski /** attaching the workers predefined by scheduler's EAL options */
21150e14527SFan Zhang static int
21285b00824SAdam Dybkowski scheduler_attach_init_worker(struct rte_cryptodev *dev)
21350e14527SFan Zhang {
21450e14527SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
21550e14527SFan Zhang 	uint8_t scheduler_id = dev->data->dev_id;
21650e14527SFan Zhang 	int i;
21750e14527SFan Zhang 
21885b00824SAdam Dybkowski 	for (i = sched_ctx->nb_init_workers - 1; i >= 0; i--) {
21985b00824SAdam Dybkowski 		const char *dev_name = sched_ctx->init_worker_names[i];
22085b00824SAdam Dybkowski 		struct rte_cryptodev *worker_dev =
22150e14527SFan Zhang 				rte_cryptodev_pmd_get_named_dev(dev_name);
22250e14527SFan Zhang 		int status;
22350e14527SFan Zhang 
22485b00824SAdam Dybkowski 		if (!worker_dev) {
22585b00824SAdam Dybkowski 			CR_SCHED_LOG(ERR, "Failed to locate worker dev %s",
22650e14527SFan Zhang 					dev_name);
22750e14527SFan Zhang 			return -EINVAL;
22850e14527SFan Zhang 		}
22950e14527SFan Zhang 
23085b00824SAdam Dybkowski 		status = rte_cryptodev_scheduler_worker_attach(
23185b00824SAdam Dybkowski 				scheduler_id, worker_dev->data->dev_id);
23250e14527SFan Zhang 
23350e14527SFan Zhang 		if (status < 0) {
23485b00824SAdam Dybkowski 			CR_SCHED_LOG(ERR, "Failed to attach worker cryptodev %u",
23585b00824SAdam Dybkowski 					worker_dev->data->dev_id);
23650e14527SFan Zhang 			return status;
23750e14527SFan Zhang 		}
23850e14527SFan Zhang 
23985b00824SAdam Dybkowski 		CR_SCHED_LOG(INFO, "Scheduler %s attached worker %s",
24050e14527SFan Zhang 				dev->data->name,
24185b00824SAdam Dybkowski 				sched_ctx->init_worker_names[i]);
24250e14527SFan Zhang 
24385b00824SAdam Dybkowski 		rte_free(sched_ctx->init_worker_names[i]);
24485b00824SAdam Dybkowski 		sched_ctx->init_worker_names[i] = NULL;
24550e14527SFan Zhang 
24685b00824SAdam Dybkowski 		sched_ctx->nb_init_workers -= 1;
24750e14527SFan Zhang 	}
24850e14527SFan Zhang 
24950e14527SFan Zhang 	return 0;
25050e14527SFan Zhang }
25157523e68SFan Zhang /** Configure device */
25257523e68SFan Zhang static int
25360e686c2SFan Zhang scheduler_pmd_config(struct rte_cryptodev *dev,
254b3bbd9e5SSlawomir Mrozowicz 		struct rte_cryptodev_config *config)
25557523e68SFan Zhang {
256b3bbd9e5SSlawomir Mrozowicz 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
257b3bbd9e5SSlawomir Mrozowicz 	uint32_t i;
25850e14527SFan Zhang 	int ret;
25950e14527SFan Zhang 
26085b00824SAdam Dybkowski 	/* although scheduler_attach_init_worker presents multiple times,
26150e14527SFan Zhang 	 * there will be only 1 meaningful execution.
26250e14527SFan Zhang 	 */
26385b00824SAdam Dybkowski 	ret = scheduler_attach_init_worker(dev);
26450e14527SFan Zhang 	if (ret < 0)
26550e14527SFan Zhang 		return ret;
26657523e68SFan Zhang 
26785b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
26885b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
269b3bbd9e5SSlawomir Mrozowicz 
27085b00824SAdam Dybkowski 		ret = rte_cryptodev_configure(worker_dev_id, config);
271b3bbd9e5SSlawomir Mrozowicz 		if (ret < 0)
272b3bbd9e5SSlawomir Mrozowicz 			break;
273b3bbd9e5SSlawomir Mrozowicz 	}
274b3bbd9e5SSlawomir Mrozowicz 
27557523e68SFan Zhang 	return ret;
27657523e68SFan Zhang }
27757523e68SFan Zhang 
27857523e68SFan Zhang static int
2798a48e039SFan Zhang update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
28057523e68SFan Zhang {
28157523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
28257523e68SFan Zhang 	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
28357523e68SFan Zhang 
28457523e68SFan Zhang 	if (sched_ctx->reordering_enabled) {
2858a48e039SFan Zhang 		char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2868a48e039SFan Zhang 		uint32_t buff_size = rte_align32pow2(
28785b00824SAdam Dybkowski 			sched_ctx->nb_workers * PER_WORKER_BUFF_SIZE);
28857523e68SFan Zhang 
2898a48e039SFan Zhang 		if (qp_ctx->order_ring) {
2908a48e039SFan Zhang 			rte_ring_free(qp_ctx->order_ring);
2918a48e039SFan Zhang 			qp_ctx->order_ring = NULL;
29257523e68SFan Zhang 		}
29357523e68SFan Zhang 
29457523e68SFan Zhang 		if (!buff_size)
29557523e68SFan Zhang 			return 0;
29657523e68SFan Zhang 
2978a48e039SFan Zhang 		if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN,
29857523e68SFan Zhang 			"%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
29957523e68SFan Zhang 			dev->data->dev_id, qp_id) < 0) {
30085aa6d34SHari Kumar 			CR_SCHED_LOG(ERR, "failed to create unique reorder buffer"
30157523e68SFan Zhang 					"name");
30257523e68SFan Zhang 			return -ENOMEM;
30357523e68SFan Zhang 		}
30457523e68SFan Zhang 
3058a48e039SFan Zhang 		qp_ctx->order_ring = rte_ring_create(order_ring_name,
3068a48e039SFan Zhang 				buff_size, rte_socket_id(),
3078a48e039SFan Zhang 				RING_F_SP_ENQ | RING_F_SC_DEQ);
3088a48e039SFan Zhang 		if (!qp_ctx->order_ring) {
30985aa6d34SHari Kumar 			CR_SCHED_LOG(ERR, "failed to create order ring");
31057523e68SFan Zhang 			return -ENOMEM;
31157523e68SFan Zhang 		}
31257523e68SFan Zhang 	} else {
3138a48e039SFan Zhang 		if (qp_ctx->order_ring) {
3148a48e039SFan Zhang 			rte_ring_free(qp_ctx->order_ring);
3158a48e039SFan Zhang 			qp_ctx->order_ring = NULL;
31657523e68SFan Zhang 		}
31757523e68SFan Zhang 	}
31857523e68SFan Zhang 
31957523e68SFan Zhang 	return 0;
32057523e68SFan Zhang }
32157523e68SFan Zhang 
32257523e68SFan Zhang /** Start device */
32357523e68SFan Zhang static int
32457523e68SFan Zhang scheduler_pmd_start(struct rte_cryptodev *dev)
32557523e68SFan Zhang {
32657523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
32757523e68SFan Zhang 	uint32_t i;
32857523e68SFan Zhang 	int ret;
32957523e68SFan Zhang 
33057523e68SFan Zhang 	if (dev->data->dev_started)
33157523e68SFan Zhang 		return 0;
33257523e68SFan Zhang 
33385b00824SAdam Dybkowski 	/* although scheduler_attach_init_worker presents multiple times,
33450e14527SFan Zhang 	 * there will be only 1 meaningful execution.
33550e14527SFan Zhang 	 */
33685b00824SAdam Dybkowski 	ret = scheduler_attach_init_worker(dev);
33750e14527SFan Zhang 	if (ret < 0)
33850e14527SFan Zhang 		return ret;
33950e14527SFan Zhang 
34057523e68SFan Zhang 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3418a48e039SFan Zhang 		ret = update_order_ring(dev, i);
34257523e68SFan Zhang 		if (ret < 0) {
34385aa6d34SHari Kumar 			CR_SCHED_LOG(ERR, "Failed to update reorder buffer");
34457523e68SFan Zhang 			return ret;
34557523e68SFan Zhang 		}
34657523e68SFan Zhang 	}
34757523e68SFan Zhang 
34857523e68SFan Zhang 	if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) {
34985aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Scheduler mode is not set");
35057523e68SFan Zhang 		return -1;
35157523e68SFan Zhang 	}
35257523e68SFan Zhang 
35385b00824SAdam Dybkowski 	if (!sched_ctx->nb_workers) {
35485b00824SAdam Dybkowski 		CR_SCHED_LOG(ERR, "No worker in the scheduler");
35557523e68SFan Zhang 		return -1;
35657523e68SFan Zhang 	}
35757523e68SFan Zhang 
3588f1d23ecSDavid Marchand 	if (*sched_ctx->ops.worker_attach == NULL)
3598f1d23ecSDavid Marchand 		return -ENOTSUP;
36057523e68SFan Zhang 
36185b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
36285b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
36357523e68SFan Zhang 
36485b00824SAdam Dybkowski 		if ((*sched_ctx->ops.worker_attach)(dev, worker_dev_id) < 0) {
36585b00824SAdam Dybkowski 			CR_SCHED_LOG(ERR, "Failed to attach worker");
36657523e68SFan Zhang 			return -ENOTSUP;
36757523e68SFan Zhang 		}
36857523e68SFan Zhang 	}
36957523e68SFan Zhang 
3708f1d23ecSDavid Marchand 	if (*sched_ctx->ops.scheduler_start == NULL)
3718f1d23ecSDavid Marchand 		return -ENOTSUP;
37257523e68SFan Zhang 
37357523e68SFan Zhang 	if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
37485aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Scheduler start failed");
37557523e68SFan Zhang 		return -1;
37657523e68SFan Zhang 	}
37757523e68SFan Zhang 
37885b00824SAdam Dybkowski 	/* start all workers */
37985b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
38085b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
381beb4c305SAkhil Goyal 		ret = rte_cryptodev_start(worker_dev_id);
38257523e68SFan Zhang 		if (ret < 0) {
38385b00824SAdam Dybkowski 			CR_SCHED_LOG(ERR, "Failed to start worker dev %u",
38485b00824SAdam Dybkowski 					worker_dev_id);
38557523e68SFan Zhang 			return ret;
38657523e68SFan Zhang 		}
38757523e68SFan Zhang 	}
38857523e68SFan Zhang 
38957523e68SFan Zhang 	return 0;
39057523e68SFan Zhang }
39157523e68SFan Zhang 
39257523e68SFan Zhang /** Stop device */
39357523e68SFan Zhang static void
39457523e68SFan Zhang scheduler_pmd_stop(struct rte_cryptodev *dev)
39557523e68SFan Zhang {
39657523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
39757523e68SFan Zhang 	uint32_t i;
39857523e68SFan Zhang 
39957523e68SFan Zhang 	if (!dev->data->dev_started)
40057523e68SFan Zhang 		return;
40157523e68SFan Zhang 
40285b00824SAdam Dybkowski 	/* stop all workers first */
40385b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
40485b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
40557523e68SFan Zhang 
406beb4c305SAkhil Goyal 		rte_cryptodev_stop(worker_dev_id);
40757523e68SFan Zhang 	}
40857523e68SFan Zhang 
40957523e68SFan Zhang 	if (*sched_ctx->ops.scheduler_stop)
41057523e68SFan Zhang 		(*sched_ctx->ops.scheduler_stop)(dev);
41157523e68SFan Zhang 
41285b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
41385b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
41457523e68SFan Zhang 
41585b00824SAdam Dybkowski 		if (*sched_ctx->ops.worker_detach)
41685b00824SAdam Dybkowski 			(*sched_ctx->ops.worker_detach)(dev, worker_dev_id);
41757523e68SFan Zhang 	}
41857523e68SFan Zhang }
41957523e68SFan Zhang 
42057523e68SFan Zhang /** Close device */
42157523e68SFan Zhang static int
42257523e68SFan Zhang scheduler_pmd_close(struct rte_cryptodev *dev)
42357523e68SFan Zhang {
42457523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
42557523e68SFan Zhang 	uint32_t i;
42657523e68SFan Zhang 	int ret;
42757523e68SFan Zhang 
42857523e68SFan Zhang 	/* the dev should be stopped before being closed */
42957523e68SFan Zhang 	if (dev->data->dev_started)
43057523e68SFan Zhang 		return -EBUSY;
43157523e68SFan Zhang 
43285b00824SAdam Dybkowski 	/* close all workers first */
43385b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
43485b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
43585b00824SAdam Dybkowski 		struct rte_cryptodev *worker_dev =
43685b00824SAdam Dybkowski 				rte_cryptodev_pmd_get_dev(worker_dev_id);
43757523e68SFan Zhang 
43885b00824SAdam Dybkowski 		ret = (*worker_dev->dev_ops->dev_close)(worker_dev);
43957523e68SFan Zhang 		if (ret < 0)
44057523e68SFan Zhang 			return ret;
44157523e68SFan Zhang 	}
44257523e68SFan Zhang 
44357523e68SFan Zhang 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
44457523e68SFan Zhang 		struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
44557523e68SFan Zhang 
4468a48e039SFan Zhang 		if (qp_ctx->order_ring) {
4478a48e039SFan Zhang 			rte_ring_free(qp_ctx->order_ring);
4488a48e039SFan Zhang 			qp_ctx->order_ring = NULL;
44957523e68SFan Zhang 		}
45057523e68SFan Zhang 
45157523e68SFan Zhang 		if (qp_ctx->private_qp_ctx) {
45257523e68SFan Zhang 			rte_free(qp_ctx->private_qp_ctx);
45357523e68SFan Zhang 			qp_ctx->private_qp_ctx = NULL;
45457523e68SFan Zhang 		}
45557523e68SFan Zhang 	}
45657523e68SFan Zhang 
45706f0a569SPablo de Lara 	if (sched_ctx->private_ctx) {
45857523e68SFan Zhang 		rte_free(sched_ctx->private_ctx);
45906f0a569SPablo de Lara 		sched_ctx->private_ctx = NULL;
46006f0a569SPablo de Lara 	}
46157523e68SFan Zhang 
462*e2af4e40SDavid Coyle 	scheduler_free_capabilities(sched_ctx);
46357523e68SFan Zhang 
46457523e68SFan Zhang 	return 0;
46557523e68SFan Zhang }
46657523e68SFan Zhang 
46757523e68SFan Zhang /** Get device statistics */
46857523e68SFan Zhang static void
46957523e68SFan Zhang scheduler_pmd_stats_get(struct rte_cryptodev *dev,
47057523e68SFan Zhang 	struct rte_cryptodev_stats *stats)
47157523e68SFan Zhang {
47257523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
47357523e68SFan Zhang 	uint32_t i;
47457523e68SFan Zhang 
47585b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
47685b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
47785b00824SAdam Dybkowski 		struct rte_cryptodev *worker_dev =
47885b00824SAdam Dybkowski 				rte_cryptodev_pmd_get_dev(worker_dev_id);
47985b00824SAdam Dybkowski 		struct rte_cryptodev_stats worker_stats = {0};
48057523e68SFan Zhang 
48185b00824SAdam Dybkowski 		(*worker_dev->dev_ops->stats_get)(worker_dev, &worker_stats);
48257523e68SFan Zhang 
48385b00824SAdam Dybkowski 		stats->enqueued_count += worker_stats.enqueued_count;
48485b00824SAdam Dybkowski 		stats->dequeued_count += worker_stats.dequeued_count;
48557523e68SFan Zhang 
48685b00824SAdam Dybkowski 		stats->enqueue_err_count += worker_stats.enqueue_err_count;
48785b00824SAdam Dybkowski 		stats->dequeue_err_count += worker_stats.dequeue_err_count;
48857523e68SFan Zhang 	}
48957523e68SFan Zhang }
49057523e68SFan Zhang 
49157523e68SFan Zhang /** Reset device statistics */
49257523e68SFan Zhang static void
49357523e68SFan Zhang scheduler_pmd_stats_reset(struct rte_cryptodev *dev)
49457523e68SFan Zhang {
49557523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
49657523e68SFan Zhang 	uint32_t i;
49757523e68SFan Zhang 
49885b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
49985b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
50085b00824SAdam Dybkowski 		struct rte_cryptodev *worker_dev =
50185b00824SAdam Dybkowski 				rte_cryptodev_pmd_get_dev(worker_dev_id);
50257523e68SFan Zhang 
50385b00824SAdam Dybkowski 		(*worker_dev->dev_ops->stats_reset)(worker_dev);
50457523e68SFan Zhang 	}
50557523e68SFan Zhang }
50657523e68SFan Zhang 
50757523e68SFan Zhang /** Get device info */
50857523e68SFan Zhang static void
50957523e68SFan Zhang scheduler_pmd_info_get(struct rte_cryptodev *dev,
51057523e68SFan Zhang 		struct rte_cryptodev_info *dev_info)
51157523e68SFan Zhang {
51257523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
513e1fc5b76SPablo de Lara 	uint32_t max_nb_sess = 0;
5146c8e8dfeSAnoob Joseph 	uint16_t headroom_sz = 0;
5156c8e8dfeSAnoob Joseph 	uint16_t tailroom_sz = 0;
51657523e68SFan Zhang 	uint32_t i;
51757523e68SFan Zhang 
51857523e68SFan Zhang 	if (!dev_info)
51957523e68SFan Zhang 		return;
52057523e68SFan Zhang 
52185b00824SAdam Dybkowski 	/* although scheduler_attach_init_worker presents multiple times,
52250e14527SFan Zhang 	 * there will be only 1 meaningful execution.
52350e14527SFan Zhang 	 */
52485b00824SAdam Dybkowski 	scheduler_attach_init_worker(dev);
52550e14527SFan Zhang 
52685b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
52785b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
52885b00824SAdam Dybkowski 		struct rte_cryptodev_info worker_info;
52957523e68SFan Zhang 
53085b00824SAdam Dybkowski 		rte_cryptodev_info_get(worker_dev_id, &worker_info);
53185b00824SAdam Dybkowski 		uint32_t dev_max_sess = worker_info.sym.max_nb_sessions;
532e1fc5b76SPablo de Lara 		if (dev_max_sess != 0) {
533e1fc5b76SPablo de Lara 			if (max_nb_sess == 0 ||	dev_max_sess < max_nb_sess)
53485b00824SAdam Dybkowski 				max_nb_sess = worker_info.sym.max_nb_sessions;
535e1fc5b76SPablo de Lara 		}
5366c8e8dfeSAnoob Joseph 
53785b00824SAdam Dybkowski 		/* Get the max headroom requirement among worker PMDs */
53885b00824SAdam Dybkowski 		headroom_sz = worker_info.min_mbuf_headroom_req >
5396c8e8dfeSAnoob Joseph 				headroom_sz ?
54085b00824SAdam Dybkowski 				worker_info.min_mbuf_headroom_req :
5416c8e8dfeSAnoob Joseph 				headroom_sz;
5426c8e8dfeSAnoob Joseph 
54385b00824SAdam Dybkowski 		/* Get the max tailroom requirement among worker PMDs */
54485b00824SAdam Dybkowski 		tailroom_sz = worker_info.min_mbuf_tailroom_req >
5456c8e8dfeSAnoob Joseph 				tailroom_sz ?
54685b00824SAdam Dybkowski 				worker_info.min_mbuf_tailroom_req :
5476c8e8dfeSAnoob Joseph 				tailroom_sz;
54857523e68SFan Zhang 	}
54957523e68SFan Zhang 
5507a364faeSSlawomir Mrozowicz 	dev_info->driver_id = dev->driver_id;
55157523e68SFan Zhang 	dev_info->feature_flags = dev->feature_flags;
55257523e68SFan Zhang 	dev_info->capabilities = sched_ctx->capabilities;
55357523e68SFan Zhang 	dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
5546c8e8dfeSAnoob Joseph 	dev_info->min_mbuf_headroom_req = headroom_sz;
5556c8e8dfeSAnoob Joseph 	dev_info->min_mbuf_tailroom_req = tailroom_sz;
556e1fc5b76SPablo de Lara 	dev_info->sym.max_nb_sessions = max_nb_sess;
55757523e68SFan Zhang }
55857523e68SFan Zhang 
55957523e68SFan Zhang /** Release queue pair */
56057523e68SFan Zhang static int
56157523e68SFan Zhang scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
56257523e68SFan Zhang {
56357523e68SFan Zhang 	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
56457523e68SFan Zhang 
56557523e68SFan Zhang 	if (!qp_ctx)
56657523e68SFan Zhang 		return 0;
56757523e68SFan Zhang 
5688a48e039SFan Zhang 	rte_ring_free(qp_ctx->order_ring);
56957523e68SFan Zhang 	rte_free(qp_ctx->private_qp_ctx);
57057523e68SFan Zhang 
57157523e68SFan Zhang 	rte_free(qp_ctx);
57257523e68SFan Zhang 	dev->data->queue_pairs[qp_id] = NULL;
57357523e68SFan Zhang 
57457523e68SFan Zhang 	return 0;
57557523e68SFan Zhang }
57657523e68SFan Zhang 
57757523e68SFan Zhang /** Setup a queue pair */
57857523e68SFan Zhang static int
57957523e68SFan Zhang scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
580725d2a7fSFan Zhang 	const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
58157523e68SFan Zhang {
58257523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
58357523e68SFan Zhang 	struct scheduler_qp_ctx *qp_ctx;
58457523e68SFan Zhang 	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
585c281019bSFan Zhang 	uint32_t i;
586c281019bSFan Zhang 	int ret;
58757523e68SFan Zhang 
58857523e68SFan Zhang 	if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
58957523e68SFan Zhang 			"CRYTO_SCHE PMD %u QP %u",
59057523e68SFan Zhang 			dev->data->dev_id, qp_id) < 0) {
59185aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Failed to create unique queue pair name");
59257523e68SFan Zhang 		return -EFAULT;
59357523e68SFan Zhang 	}
59457523e68SFan Zhang 
59557523e68SFan Zhang 	/* Free memory prior to re-allocation if needed. */
59657523e68SFan Zhang 	if (dev->data->queue_pairs[qp_id] != NULL)
59757523e68SFan Zhang 		scheduler_pmd_qp_release(dev, qp_id);
59857523e68SFan Zhang 
59985b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
60085b00824SAdam Dybkowski 		uint8_t worker_id = sched_ctx->workers[i].dev_id;
601c281019bSFan Zhang 
602f7db6f82SPablo de Lara 		/*
60385b00824SAdam Dybkowski 		 * All workers will share the same session mempool
604f7db6f82SPablo de Lara 		 * for session-less operations, so the objects
605f7db6f82SPablo de Lara 		 * must be big enough for all the drivers used.
606f7db6f82SPablo de Lara 		 */
60785b00824SAdam Dybkowski 		ret = rte_cryptodev_queue_pair_setup(worker_id, qp_id,
608725d2a7fSFan Zhang 				qp_conf, socket_id);
609c281019bSFan Zhang 		if (ret < 0)
610c281019bSFan Zhang 			return ret;
611c281019bSFan Zhang 	}
612c281019bSFan Zhang 
61357523e68SFan Zhang 	/* Allocate the queue pair data structure. */
61457523e68SFan Zhang 	qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
61557523e68SFan Zhang 			socket_id);
61657523e68SFan Zhang 	if (qp_ctx == NULL)
61757523e68SFan Zhang 		return -ENOMEM;
61857523e68SFan Zhang 
61988405476SFan Zhang 	/* The actual available object number = nb_descriptors - 1 */
62088405476SFan Zhang 	qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1;
62188405476SFan Zhang 
62257523e68SFan Zhang 	dev->data->queue_pairs[qp_id] = qp_ctx;
62357523e68SFan Zhang 
62485b00824SAdam Dybkowski 	/* although scheduler_attach_init_worker presents multiple times,
62550e14527SFan Zhang 	 * there will be only 1 meaningful execution.
62650e14527SFan Zhang 	 */
62785b00824SAdam Dybkowski 	ret = scheduler_attach_init_worker(dev);
62850e14527SFan Zhang 	if (ret < 0) {
62985b00824SAdam Dybkowski 		CR_SCHED_LOG(ERR, "Failed to attach worker");
63050e14527SFan Zhang 		scheduler_pmd_qp_release(dev, qp_id);
63150e14527SFan Zhang 		return ret;
63250e14527SFan Zhang 	}
63350e14527SFan Zhang 
63457523e68SFan Zhang 	if (*sched_ctx->ops.config_queue_pair) {
63557523e68SFan Zhang 		if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) {
63685aa6d34SHari Kumar 			CR_SCHED_LOG(ERR, "Unable to configure queue pair");
63757523e68SFan Zhang 			return -1;
63857523e68SFan Zhang 		}
63957523e68SFan Zhang 	}
64057523e68SFan Zhang 
64157523e68SFan Zhang 	return 0;
64257523e68SFan Zhang }
64357523e68SFan Zhang 
64457523e68SFan Zhang static uint32_t
645*e2af4e40SDavid Coyle scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev)
64657523e68SFan Zhang {
647b3bbd9e5SSlawomir Mrozowicz 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
648b3bbd9e5SSlawomir Mrozowicz 
649*e2af4e40SDavid Coyle 	return scheduler_session_size_get(sched_ctx, RTE_CRYPTO_OP_WITH_SESSION);
650b3bbd9e5SSlawomir Mrozowicz }
651b3bbd9e5SSlawomir Mrozowicz 
65257523e68SFan Zhang static int
6536812b9bfSFan Zhang scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev,
6546812b9bfSFan Zhang 	struct rte_crypto_sym_xform *xform,
6556812b9bfSFan Zhang 	struct rte_cryptodev_sym_session *sess)
65657523e68SFan Zhang {
6576812b9bfSFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
6586812b9bfSFan Zhang 
659*e2af4e40SDavid Coyle 	return scheduler_session_create(sess, xform, sched_ctx, RTE_CRYPTO_OP_WITH_SESSION);
66057523e68SFan Zhang }
66157523e68SFan Zhang 
66257523e68SFan Zhang /** Clear the memory of session so it doesn't leave key material behind */
66357523e68SFan Zhang static void
6646812b9bfSFan Zhang scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev,
6656812b9bfSFan Zhang 		struct rte_cryptodev_sym_session *sess)
6666812b9bfSFan Zhang {
6676812b9bfSFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
6686812b9bfSFan Zhang 
669*e2af4e40SDavid Coyle 	scheduler_session_destroy(sess, sched_ctx, RTE_CRYPTO_OP_WITH_SESSION);
6706812b9bfSFan Zhang }
67157523e68SFan Zhang 
672b74fd6b8SFerruh Yigit static struct rte_cryptodev_ops scheduler_pmd_ops = {
67357523e68SFan Zhang 		.dev_configure		= scheduler_pmd_config,
67457523e68SFan Zhang 		.dev_start		= scheduler_pmd_start,
67557523e68SFan Zhang 		.dev_stop		= scheduler_pmd_stop,
67657523e68SFan Zhang 		.dev_close		= scheduler_pmd_close,
67757523e68SFan Zhang 
67857523e68SFan Zhang 		.stats_get		= scheduler_pmd_stats_get,
67957523e68SFan Zhang 		.stats_reset		= scheduler_pmd_stats_reset,
68057523e68SFan Zhang 
68157523e68SFan Zhang 		.dev_infos_get		= scheduler_pmd_info_get,
68257523e68SFan Zhang 
68357523e68SFan Zhang 		.queue_pair_setup	= scheduler_pmd_qp_setup,
68457523e68SFan Zhang 		.queue_pair_release	= scheduler_pmd_qp_release,
68557523e68SFan Zhang 
686012c5076SPablo de Lara 		.sym_session_get_size	= scheduler_pmd_sym_session_get_size,
687012c5076SPablo de Lara 		.sym_session_configure	= scheduler_pmd_sym_session_configure,
688012c5076SPablo de Lara 		.sym_session_clear	= scheduler_pmd_sym_session_clear,
68957523e68SFan Zhang };
69057523e68SFan Zhang 
69157523e68SFan Zhang struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;
692*e2af4e40SDavid Coyle 
693*e2af4e40SDavid Coyle /** Configure a scheduler session from a security session configuration */
694*e2af4e40SDavid Coyle static int
695*e2af4e40SDavid Coyle scheduler_pmd_sec_sess_create(void *dev, struct rte_security_session_conf *conf,
696*e2af4e40SDavid Coyle 			struct rte_security_session *sess)
697*e2af4e40SDavid Coyle {
698*e2af4e40SDavid Coyle 	struct rte_cryptodev *cdev = dev;
699*e2af4e40SDavid Coyle 	struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
700*e2af4e40SDavid Coyle 
701*e2af4e40SDavid Coyle 	/* Check for supported security protocols */
702*e2af4e40SDavid Coyle 	if (!scheduler_check_sec_proto_supp(conf->action_type, conf->protocol)) {
703*e2af4e40SDavid Coyle 		CR_SCHED_LOG(ERR, "Unsupported security protocol");
704*e2af4e40SDavid Coyle 		return -ENOTSUP;
705*e2af4e40SDavid Coyle 	}
706*e2af4e40SDavid Coyle 
707*e2af4e40SDavid Coyle 	return scheduler_session_create(sess, conf, sched_ctx, RTE_CRYPTO_OP_SECURITY_SESSION);
708*e2af4e40SDavid Coyle }
709*e2af4e40SDavid Coyle 
710*e2af4e40SDavid Coyle /** Clear the memory of session so it doesn't leave key material behind */
711*e2af4e40SDavid Coyle static int
712*e2af4e40SDavid Coyle scheduler_pmd_sec_sess_destroy(void *dev,
713*e2af4e40SDavid Coyle 			       struct rte_security_session *sess)
714*e2af4e40SDavid Coyle {
715*e2af4e40SDavid Coyle 	struct rte_cryptodev *cdev = dev;
716*e2af4e40SDavid Coyle 	struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
717*e2af4e40SDavid Coyle 
718*e2af4e40SDavid Coyle 	scheduler_session_destroy(sess, sched_ctx, RTE_CRYPTO_OP_SECURITY_SESSION);
719*e2af4e40SDavid Coyle 
720*e2af4e40SDavid Coyle 	return 0;
721*e2af4e40SDavid Coyle }
722*e2af4e40SDavid Coyle 
723*e2af4e40SDavid Coyle /** Get sync security capabilities for scheduler pmds */
724*e2af4e40SDavid Coyle static const struct rte_security_capability *
725*e2af4e40SDavid Coyle scheduler_pmd_sec_capa_get(void *dev)
726*e2af4e40SDavid Coyle {
727*e2af4e40SDavid Coyle 	struct rte_cryptodev *cdev = dev;
728*e2af4e40SDavid Coyle 	struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
729*e2af4e40SDavid Coyle 
730*e2af4e40SDavid Coyle 	return sched_ctx->sec_capabilities;
731*e2af4e40SDavid Coyle }
732*e2af4e40SDavid Coyle 
733*e2af4e40SDavid Coyle static unsigned int
734*e2af4e40SDavid Coyle scheduler_pmd_sec_sess_size_get(void *dev)
735*e2af4e40SDavid Coyle {
736*e2af4e40SDavid Coyle 	struct rte_cryptodev *cdev = dev;
737*e2af4e40SDavid Coyle 	struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
738*e2af4e40SDavid Coyle 
739*e2af4e40SDavid Coyle 	return scheduler_session_size_get(sched_ctx,
740*e2af4e40SDavid Coyle 				RTE_CRYPTO_OP_SECURITY_SESSION);
741*e2af4e40SDavid Coyle }
742*e2af4e40SDavid Coyle 
743*e2af4e40SDavid Coyle static struct rte_security_ops scheduler_pmd_sec_ops = {
744*e2af4e40SDavid Coyle 		.session_create = scheduler_pmd_sec_sess_create,
745*e2af4e40SDavid Coyle 		.session_update = NULL,
746*e2af4e40SDavid Coyle 		.session_get_size = scheduler_pmd_sec_sess_size_get,
747*e2af4e40SDavid Coyle 		.session_stats_get = NULL,
748*e2af4e40SDavid Coyle 		.session_destroy = scheduler_pmd_sec_sess_destroy,
749*e2af4e40SDavid Coyle 		.set_pkt_metadata = NULL,
750*e2af4e40SDavid Coyle 		.capabilities_get = scheduler_pmd_sec_capa_get
751*e2af4e40SDavid Coyle };
752*e2af4e40SDavid Coyle 
753*e2af4e40SDavid Coyle struct rte_security_ops *rte_crypto_scheduler_pmd_sec_ops =
754*e2af4e40SDavid Coyle 							&scheduler_pmd_sec_ops;
755