xref: /dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c (revision b00bf84f0d3eb4c6a2944c918f697dc17cb3fce5)
15566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
25566a3e3SBruce Richardson  * Copyright(c) 2017 Intel Corporation
357523e68SFan Zhang  */
457523e68SFan Zhang #include <string.h>
557523e68SFan Zhang 
657523e68SFan Zhang #include <rte_common.h>
757523e68SFan Zhang #include <rte_malloc.h>
81acb7f54SDavid Marchand #include <dev_driver.h>
957523e68SFan Zhang #include <rte_cryptodev.h>
10af668035SAkhil Goyal #include <cryptodev_pmd.h>
11e2af4e40SDavid Coyle #include <rte_security_driver.h>
1257523e68SFan Zhang #include <rte_reorder.h>
136812b9bfSFan Zhang #include <rte_errno.h>
1457523e68SFan Zhang 
1557523e68SFan Zhang #include "scheduler_pmd_private.h"
1657523e68SFan Zhang 
17e2af4e40SDavid Coyle struct scheduler_configured_sess_info {
18e2af4e40SDavid Coyle 	uint8_t dev_id;
19e2af4e40SDavid Coyle 	uint8_t driver_id;
20e2af4e40SDavid Coyle 	union {
21e2af4e40SDavid Coyle 		struct rte_cryptodev_sym_session *sess;
22e2af4e40SDavid Coyle 		struct {
23e2af4e40SDavid Coyle 			struct rte_security_session *sec_sess;
24e2af4e40SDavid Coyle 			struct rte_security_ctx *sec_ctx;
25e2af4e40SDavid Coyle 		};
26e2af4e40SDavid Coyle 	};
27e2af4e40SDavid Coyle };
28e2af4e40SDavid Coyle 
29e2af4e40SDavid Coyle static int
30e2af4e40SDavid Coyle scheduler_session_create(void *sess, void *sess_params,
31e2af4e40SDavid Coyle 		struct scheduler_ctx *sched_ctx,
32e2af4e40SDavid Coyle 		enum rte_crypto_op_sess_type session_type)
33e2af4e40SDavid Coyle {
34e2af4e40SDavid Coyle 	struct rte_mempool *mp = rte_mempool_from_obj(sess);
35e2af4e40SDavid Coyle 	struct scheduler_session_ctx *sess_ctx;
36e2af4e40SDavid Coyle 	struct scheduler_configured_sess_info configured_sess[
37e2af4e40SDavid Coyle 			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}};
38e2af4e40SDavid Coyle 	uint32_t i, j, n_configured_sess = 0;
39e2af4e40SDavid Coyle 	int ret = 0;
40e2af4e40SDavid Coyle 
41e2af4e40SDavid Coyle 	if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
42e2af4e40SDavid Coyle 		sess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
43e2af4e40SDavid Coyle 	else
44e2af4e40SDavid Coyle 		sess_ctx = SECURITY_GET_SESS_PRIV(sess);
45e2af4e40SDavid Coyle 
46e2af4e40SDavid Coyle 	if (mp == NULL)
47e2af4e40SDavid Coyle 		return -EINVAL;
48e2af4e40SDavid Coyle 
49e2af4e40SDavid Coyle 	for (i = 0; i < sched_ctx->nb_workers; i++) {
50e2af4e40SDavid Coyle 		struct scheduler_worker *worker = &sched_ctx->workers[i];
51e2af4e40SDavid Coyle 		struct rte_cryptodev *dev = &rte_cryptodevs[worker->dev_id];
52e2af4e40SDavid Coyle 		uint8_t next_worker = 0;
53e2af4e40SDavid Coyle 
54e2af4e40SDavid Coyle 		for (j = 0; j < n_configured_sess; j++) {
55e2af4e40SDavid Coyle 			if (configured_sess[j].driver_id == worker->driver_id) {
56e2af4e40SDavid Coyle 				if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
57e2af4e40SDavid Coyle 					sess_ctx->worker_sess[i] =
58e2af4e40SDavid Coyle 						configured_sess[j].sess;
59e2af4e40SDavid Coyle 				else
60e2af4e40SDavid Coyle 					sess_ctx->worker_sec_sess[i] =
61e2af4e40SDavid Coyle 						configured_sess[j].sec_sess;
62e2af4e40SDavid Coyle 
63e2af4e40SDavid Coyle 				next_worker = 1;
64e2af4e40SDavid Coyle 				break;
65e2af4e40SDavid Coyle 			}
66e2af4e40SDavid Coyle 		}
67e2af4e40SDavid Coyle 		if (next_worker)
68e2af4e40SDavid Coyle 			continue;
69e2af4e40SDavid Coyle 
70e2af4e40SDavid Coyle 		if (rte_mempool_avail_count(mp) == 0) {
71e2af4e40SDavid Coyle 			ret = -ENOMEM;
72e2af4e40SDavid Coyle 			goto error_exit;
73e2af4e40SDavid Coyle 		}
74e2af4e40SDavid Coyle 
75e2af4e40SDavid Coyle 		if (session_type == RTE_CRYPTO_OP_WITH_SESSION) {
76e2af4e40SDavid Coyle 			struct rte_cryptodev_sym_session *worker_sess =
77e2af4e40SDavid Coyle 				rte_cryptodev_sym_session_create(worker->dev_id,
78e2af4e40SDavid Coyle 						sess_params, mp);
79e2af4e40SDavid Coyle 
80e2af4e40SDavid Coyle 			if (worker_sess == NULL) {
81e2af4e40SDavid Coyle 				ret = -rte_errno;
82e2af4e40SDavid Coyle 				goto error_exit;
83e2af4e40SDavid Coyle 			}
84e2af4e40SDavid Coyle 
85e2af4e40SDavid Coyle 			worker_sess->opaque_data = (uint64_t)sess;
86e2af4e40SDavid Coyle 			sess_ctx->worker_sess[i] = worker_sess;
87e2af4e40SDavid Coyle 			configured_sess[n_configured_sess].sess = worker_sess;
88e2af4e40SDavid Coyle 		} else {
89e2af4e40SDavid Coyle 			struct rte_security_session *worker_sess =
90e2af4e40SDavid Coyle 				rte_security_session_create(dev->security_ctx,
91e2af4e40SDavid Coyle 						sess_params, mp);
92e2af4e40SDavid Coyle 
93e2af4e40SDavid Coyle 			if (worker_sess == NULL) {
94e2af4e40SDavid Coyle 				ret = -rte_errno;
95e2af4e40SDavid Coyle 				goto error_exit;
96e2af4e40SDavid Coyle 			}
97e2af4e40SDavid Coyle 
98e2af4e40SDavid Coyle 			worker_sess->opaque_data = (uint64_t)sess;
99e2af4e40SDavid Coyle 			sess_ctx->worker_sec_sess[i] = worker_sess;
100e2af4e40SDavid Coyle 			configured_sess[n_configured_sess].sec_sess =
101e2af4e40SDavid Coyle 							worker_sess;
102e2af4e40SDavid Coyle 			configured_sess[n_configured_sess].sec_ctx =
103e2af4e40SDavid Coyle 							dev->security_ctx;
104e2af4e40SDavid Coyle 		}
105e2af4e40SDavid Coyle 
106e2af4e40SDavid Coyle 		configured_sess[n_configured_sess].driver_id =
107e2af4e40SDavid Coyle 							worker->driver_id;
108e2af4e40SDavid Coyle 		configured_sess[n_configured_sess].dev_id = worker->dev_id;
109e2af4e40SDavid Coyle 		n_configured_sess++;
110e2af4e40SDavid Coyle 	}
111e2af4e40SDavid Coyle 
112e2af4e40SDavid Coyle 	return 0;
113e2af4e40SDavid Coyle 
114e2af4e40SDavid Coyle error_exit:
115e2af4e40SDavid Coyle 	sess_ctx->ref_cnt = sched_ctx->ref_cnt;
116e2af4e40SDavid Coyle 	for (i = 0; i < n_configured_sess; i++) {
117e2af4e40SDavid Coyle 		if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
118e2af4e40SDavid Coyle 			rte_cryptodev_sym_session_free(
119e2af4e40SDavid Coyle 						configured_sess[i].dev_id,
120e2af4e40SDavid Coyle 						configured_sess[i].sess);
121e2af4e40SDavid Coyle 		else
122e2af4e40SDavid Coyle 			rte_security_session_destroy(
123e2af4e40SDavid Coyle 						configured_sess[i].sec_ctx,
124e2af4e40SDavid Coyle 						configured_sess[i].sec_sess);
125e2af4e40SDavid Coyle 	}
126e2af4e40SDavid Coyle 
127e2af4e40SDavid Coyle 	return ret;
128e2af4e40SDavid Coyle }
129e2af4e40SDavid Coyle 
130e2af4e40SDavid Coyle static void
131e2af4e40SDavid Coyle scheduler_session_destroy(void *sess, struct scheduler_ctx *sched_ctx,
132e2af4e40SDavid Coyle 		uint8_t session_type)
133e2af4e40SDavid Coyle {
134e2af4e40SDavid Coyle 	struct scheduler_session_ctx *sess_ctx;
135e2af4e40SDavid Coyle 	struct scheduler_configured_sess_info deleted_sess[
136e2af4e40SDavid Coyle 			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}};
137e2af4e40SDavid Coyle 	uint32_t i, j, n_deleted_sess = 0;
138e2af4e40SDavid Coyle 
139e2af4e40SDavid Coyle 	if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
140e2af4e40SDavid Coyle 		sess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
141e2af4e40SDavid Coyle 	else
142e2af4e40SDavid Coyle 		sess_ctx = SECURITY_GET_SESS_PRIV(sess);
143e2af4e40SDavid Coyle 
144e2af4e40SDavid Coyle 	if (sched_ctx->ref_cnt != sess_ctx->ref_cnt) {
145e2af4e40SDavid Coyle 		CR_SCHED_LOG(WARNING,
146e2af4e40SDavid Coyle 			"Worker updated between session creation/deletion. "
147e2af4e40SDavid Coyle 			"The session may not be freed fully.");
148e2af4e40SDavid Coyle 	}
149e2af4e40SDavid Coyle 
150e2af4e40SDavid Coyle 	for (i = 0; i < sched_ctx->nb_workers; i++) {
151e2af4e40SDavid Coyle 		struct scheduler_worker *worker = &sched_ctx->workers[i];
152e2af4e40SDavid Coyle 		struct rte_cryptodev *dev = &rte_cryptodevs[worker->dev_id];
153e2af4e40SDavid Coyle 		uint8_t next_worker = 0;
154e2af4e40SDavid Coyle 
155e2af4e40SDavid Coyle 		for (j = 0; j < n_deleted_sess; j++) {
156e2af4e40SDavid Coyle 			if (deleted_sess[j].driver_id == worker->driver_id) {
157e2af4e40SDavid Coyle 				if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
158e2af4e40SDavid Coyle 					sess_ctx->worker_sess[i] = NULL;
159e2af4e40SDavid Coyle 				else
160e2af4e40SDavid Coyle 					sess_ctx->worker_sec_sess[i] = NULL;
161e2af4e40SDavid Coyle 
162e2af4e40SDavid Coyle 				next_worker = 1;
163e2af4e40SDavid Coyle 				break;
164e2af4e40SDavid Coyle 			}
165e2af4e40SDavid Coyle 		}
166e2af4e40SDavid Coyle 		if (next_worker)
167e2af4e40SDavid Coyle 			continue;
168e2af4e40SDavid Coyle 
169e2af4e40SDavid Coyle 		if (session_type == RTE_CRYPTO_OP_WITH_SESSION) {
170e2af4e40SDavid Coyle 			rte_cryptodev_sym_session_free(worker->dev_id,
171e2af4e40SDavid Coyle 						sess_ctx->worker_sess[i]);
172e2af4e40SDavid Coyle 			sess_ctx->worker_sess[i] = NULL;
173e2af4e40SDavid Coyle 		} else {
174e2af4e40SDavid Coyle 			rte_security_session_destroy(dev->security_ctx,
175e2af4e40SDavid Coyle 						sess_ctx->worker_sec_sess[i]);
176e2af4e40SDavid Coyle 			sess_ctx->worker_sec_sess[i] = NULL;
177e2af4e40SDavid Coyle 		}
178e2af4e40SDavid Coyle 
179e2af4e40SDavid Coyle 		deleted_sess[n_deleted_sess++].driver_id = worker->driver_id;
180e2af4e40SDavid Coyle 	}
181e2af4e40SDavid Coyle }
182e2af4e40SDavid Coyle 
183e2af4e40SDavid Coyle static unsigned int
184e2af4e40SDavid Coyle scheduler_session_size_get(struct scheduler_ctx *sched_ctx,
185e2af4e40SDavid Coyle 		uint8_t session_type)
186e2af4e40SDavid Coyle {
187e2af4e40SDavid Coyle 	uint8_t i = 0;
188*b00bf84fSJulien Hascoet 	uint32_t max_priv_sess_size = sizeof(struct scheduler_session_ctx);
189e2af4e40SDavid Coyle 
190e2af4e40SDavid Coyle 	/* Check what is the maximum private session size for all workers */
191e2af4e40SDavid Coyle 	for (i = 0; i < sched_ctx->nb_workers; i++) {
192e2af4e40SDavid Coyle 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
193e2af4e40SDavid Coyle 		struct rte_cryptodev *dev = &rte_cryptodevs[worker_dev_id];
194e2af4e40SDavid Coyle 		struct rte_security_ctx *sec_ctx = dev->security_ctx;
195e2af4e40SDavid Coyle 		uint32_t priv_sess_size = 0;
196e2af4e40SDavid Coyle 
197e2af4e40SDavid Coyle 		if (session_type == RTE_CRYPTO_OP_WITH_SESSION) {
198e2af4e40SDavid Coyle 			priv_sess_size =
199e2af4e40SDavid Coyle 				(*dev->dev_ops->sym_session_get_size)(dev);
200e2af4e40SDavid Coyle 		} else {
201e2af4e40SDavid Coyle 			priv_sess_size = (*sec_ctx->ops->session_get_size)(dev);
202e2af4e40SDavid Coyle 		}
203e2af4e40SDavid Coyle 
204e2af4e40SDavid Coyle 		max_priv_sess_size = RTE_MAX(max_priv_sess_size, priv_sess_size);
205e2af4e40SDavid Coyle 	}
206e2af4e40SDavid Coyle 
207e2af4e40SDavid Coyle 	return max_priv_sess_size;
208e2af4e40SDavid Coyle }
209e2af4e40SDavid Coyle 
21085b00824SAdam Dybkowski /** attaching the workers predefined by scheduler's EAL options */
21150e14527SFan Zhang static int
21285b00824SAdam Dybkowski scheduler_attach_init_worker(struct rte_cryptodev *dev)
21350e14527SFan Zhang {
21450e14527SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
21550e14527SFan Zhang 	uint8_t scheduler_id = dev->data->dev_id;
21650e14527SFan Zhang 	int i;
21750e14527SFan Zhang 
21885b00824SAdam Dybkowski 	for (i = sched_ctx->nb_init_workers - 1; i >= 0; i--) {
21985b00824SAdam Dybkowski 		const char *dev_name = sched_ctx->init_worker_names[i];
22085b00824SAdam Dybkowski 		struct rte_cryptodev *worker_dev =
22150e14527SFan Zhang 				rte_cryptodev_pmd_get_named_dev(dev_name);
22250e14527SFan Zhang 		int status;
22350e14527SFan Zhang 
22485b00824SAdam Dybkowski 		if (!worker_dev) {
22585b00824SAdam Dybkowski 			CR_SCHED_LOG(ERR, "Failed to locate worker dev %s",
22650e14527SFan Zhang 					dev_name);
22750e14527SFan Zhang 			return -EINVAL;
22850e14527SFan Zhang 		}
22950e14527SFan Zhang 
23085b00824SAdam Dybkowski 		status = rte_cryptodev_scheduler_worker_attach(
23185b00824SAdam Dybkowski 				scheduler_id, worker_dev->data->dev_id);
23250e14527SFan Zhang 
23350e14527SFan Zhang 		if (status < 0) {
23485b00824SAdam Dybkowski 			CR_SCHED_LOG(ERR, "Failed to attach worker cryptodev %u",
23585b00824SAdam Dybkowski 					worker_dev->data->dev_id);
23650e14527SFan Zhang 			return status;
23750e14527SFan Zhang 		}
23850e14527SFan Zhang 
23985b00824SAdam Dybkowski 		CR_SCHED_LOG(INFO, "Scheduler %s attached worker %s",
24050e14527SFan Zhang 				dev->data->name,
24185b00824SAdam Dybkowski 				sched_ctx->init_worker_names[i]);
24250e14527SFan Zhang 
24385b00824SAdam Dybkowski 		rte_free(sched_ctx->init_worker_names[i]);
24485b00824SAdam Dybkowski 		sched_ctx->init_worker_names[i] = NULL;
24550e14527SFan Zhang 
24685b00824SAdam Dybkowski 		sched_ctx->nb_init_workers -= 1;
24750e14527SFan Zhang 	}
24850e14527SFan Zhang 
24950e14527SFan Zhang 	return 0;
25050e14527SFan Zhang }
25157523e68SFan Zhang /** Configure device */
25257523e68SFan Zhang static int
25360e686c2SFan Zhang scheduler_pmd_config(struct rte_cryptodev *dev,
254b3bbd9e5SSlawomir Mrozowicz 		struct rte_cryptodev_config *config)
25557523e68SFan Zhang {
256b3bbd9e5SSlawomir Mrozowicz 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
257b3bbd9e5SSlawomir Mrozowicz 	uint32_t i;
25850e14527SFan Zhang 	int ret;
25950e14527SFan Zhang 
26085b00824SAdam Dybkowski 	/* although scheduler_attach_init_worker presents multiple times,
26150e14527SFan Zhang 	 * there will be only 1 meaningful execution.
26250e14527SFan Zhang 	 */
26385b00824SAdam Dybkowski 	ret = scheduler_attach_init_worker(dev);
26450e14527SFan Zhang 	if (ret < 0)
26550e14527SFan Zhang 		return ret;
26657523e68SFan Zhang 
26785b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
26885b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
269b3bbd9e5SSlawomir Mrozowicz 
27085b00824SAdam Dybkowski 		ret = rte_cryptodev_configure(worker_dev_id, config);
271b3bbd9e5SSlawomir Mrozowicz 		if (ret < 0)
272b3bbd9e5SSlawomir Mrozowicz 			break;
273b3bbd9e5SSlawomir Mrozowicz 	}
274b3bbd9e5SSlawomir Mrozowicz 
27557523e68SFan Zhang 	return ret;
27657523e68SFan Zhang }
27757523e68SFan Zhang 
27857523e68SFan Zhang static int
2798a48e039SFan Zhang update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
28057523e68SFan Zhang {
28157523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
28257523e68SFan Zhang 	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
28357523e68SFan Zhang 
28457523e68SFan Zhang 	if (sched_ctx->reordering_enabled) {
2858a48e039SFan Zhang 		char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2868a48e039SFan Zhang 		uint32_t buff_size = rte_align32pow2(
28785b00824SAdam Dybkowski 			sched_ctx->nb_workers * PER_WORKER_BUFF_SIZE);
28857523e68SFan Zhang 
2898a48e039SFan Zhang 		if (qp_ctx->order_ring) {
2908a48e039SFan Zhang 			rte_ring_free(qp_ctx->order_ring);
2918a48e039SFan Zhang 			qp_ctx->order_ring = NULL;
29257523e68SFan Zhang 		}
29357523e68SFan Zhang 
29457523e68SFan Zhang 		if (!buff_size)
29557523e68SFan Zhang 			return 0;
29657523e68SFan Zhang 
2978a48e039SFan Zhang 		if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN,
29857523e68SFan Zhang 			"%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
29957523e68SFan Zhang 			dev->data->dev_id, qp_id) < 0) {
30085aa6d34SHari Kumar 			CR_SCHED_LOG(ERR, "failed to create unique reorder buffer"
30157523e68SFan Zhang 					"name");
30257523e68SFan Zhang 			return -ENOMEM;
30357523e68SFan Zhang 		}
30457523e68SFan Zhang 
3058a48e039SFan Zhang 		qp_ctx->order_ring = rte_ring_create(order_ring_name,
3068a48e039SFan Zhang 				buff_size, rte_socket_id(),
3078a48e039SFan Zhang 				RING_F_SP_ENQ | RING_F_SC_DEQ);
3088a48e039SFan Zhang 		if (!qp_ctx->order_ring) {
30985aa6d34SHari Kumar 			CR_SCHED_LOG(ERR, "failed to create order ring");
31057523e68SFan Zhang 			return -ENOMEM;
31157523e68SFan Zhang 		}
31257523e68SFan Zhang 	} else {
3138a48e039SFan Zhang 		if (qp_ctx->order_ring) {
3148a48e039SFan Zhang 			rte_ring_free(qp_ctx->order_ring);
3158a48e039SFan Zhang 			qp_ctx->order_ring = NULL;
31657523e68SFan Zhang 		}
31757523e68SFan Zhang 	}
31857523e68SFan Zhang 
31957523e68SFan Zhang 	return 0;
32057523e68SFan Zhang }
32157523e68SFan Zhang 
32257523e68SFan Zhang /** Start device */
32357523e68SFan Zhang static int
32457523e68SFan Zhang scheduler_pmd_start(struct rte_cryptodev *dev)
32557523e68SFan Zhang {
32657523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
32757523e68SFan Zhang 	uint32_t i;
32857523e68SFan Zhang 	int ret;
32957523e68SFan Zhang 
33057523e68SFan Zhang 	if (dev->data->dev_started)
33157523e68SFan Zhang 		return 0;
33257523e68SFan Zhang 
33385b00824SAdam Dybkowski 	/* although scheduler_attach_init_worker presents multiple times,
33450e14527SFan Zhang 	 * there will be only 1 meaningful execution.
33550e14527SFan Zhang 	 */
33685b00824SAdam Dybkowski 	ret = scheduler_attach_init_worker(dev);
33750e14527SFan Zhang 	if (ret < 0)
33850e14527SFan Zhang 		return ret;
33950e14527SFan Zhang 
34057523e68SFan Zhang 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3418a48e039SFan Zhang 		ret = update_order_ring(dev, i);
34257523e68SFan Zhang 		if (ret < 0) {
34385aa6d34SHari Kumar 			CR_SCHED_LOG(ERR, "Failed to update reorder buffer");
34457523e68SFan Zhang 			return ret;
34557523e68SFan Zhang 		}
34657523e68SFan Zhang 	}
34757523e68SFan Zhang 
34857523e68SFan Zhang 	if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) {
34985aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Scheduler mode is not set");
35057523e68SFan Zhang 		return -1;
35157523e68SFan Zhang 	}
35257523e68SFan Zhang 
35385b00824SAdam Dybkowski 	if (!sched_ctx->nb_workers) {
35485b00824SAdam Dybkowski 		CR_SCHED_LOG(ERR, "No worker in the scheduler");
35557523e68SFan Zhang 		return -1;
35657523e68SFan Zhang 	}
35757523e68SFan Zhang 
3588f1d23ecSDavid Marchand 	if (*sched_ctx->ops.worker_attach == NULL)
3598f1d23ecSDavid Marchand 		return -ENOTSUP;
36057523e68SFan Zhang 
36185b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
36285b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
36357523e68SFan Zhang 
36485b00824SAdam Dybkowski 		if ((*sched_ctx->ops.worker_attach)(dev, worker_dev_id) < 0) {
36585b00824SAdam Dybkowski 			CR_SCHED_LOG(ERR, "Failed to attach worker");
36657523e68SFan Zhang 			return -ENOTSUP;
36757523e68SFan Zhang 		}
36857523e68SFan Zhang 	}
36957523e68SFan Zhang 
3708f1d23ecSDavid Marchand 	if (*sched_ctx->ops.scheduler_start == NULL)
3718f1d23ecSDavid Marchand 		return -ENOTSUP;
37257523e68SFan Zhang 
37357523e68SFan Zhang 	if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
37485aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Scheduler start failed");
37557523e68SFan Zhang 		return -1;
37657523e68SFan Zhang 	}
37757523e68SFan Zhang 
37885b00824SAdam Dybkowski 	/* start all workers */
37985b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
38085b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
381beb4c305SAkhil Goyal 		ret = rte_cryptodev_start(worker_dev_id);
38257523e68SFan Zhang 		if (ret < 0) {
38385b00824SAdam Dybkowski 			CR_SCHED_LOG(ERR, "Failed to start worker dev %u",
38485b00824SAdam Dybkowski 					worker_dev_id);
38557523e68SFan Zhang 			return ret;
38657523e68SFan Zhang 		}
38757523e68SFan Zhang 	}
38857523e68SFan Zhang 
38957523e68SFan Zhang 	return 0;
39057523e68SFan Zhang }
39157523e68SFan Zhang 
39257523e68SFan Zhang /** Stop device */
39357523e68SFan Zhang static void
39457523e68SFan Zhang scheduler_pmd_stop(struct rte_cryptodev *dev)
39557523e68SFan Zhang {
39657523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
39757523e68SFan Zhang 	uint32_t i;
39857523e68SFan Zhang 
39957523e68SFan Zhang 	if (!dev->data->dev_started)
40057523e68SFan Zhang 		return;
40157523e68SFan Zhang 
40285b00824SAdam Dybkowski 	/* stop all workers first */
40385b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
40485b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
40557523e68SFan Zhang 
406beb4c305SAkhil Goyal 		rte_cryptodev_stop(worker_dev_id);
40757523e68SFan Zhang 	}
40857523e68SFan Zhang 
40957523e68SFan Zhang 	if (*sched_ctx->ops.scheduler_stop)
41057523e68SFan Zhang 		(*sched_ctx->ops.scheduler_stop)(dev);
41157523e68SFan Zhang 
41285b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
41385b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
41457523e68SFan Zhang 
41585b00824SAdam Dybkowski 		if (*sched_ctx->ops.worker_detach)
41685b00824SAdam Dybkowski 			(*sched_ctx->ops.worker_detach)(dev, worker_dev_id);
41757523e68SFan Zhang 	}
41857523e68SFan Zhang }
41957523e68SFan Zhang 
42057523e68SFan Zhang /** Close device */
42157523e68SFan Zhang static int
42257523e68SFan Zhang scheduler_pmd_close(struct rte_cryptodev *dev)
42357523e68SFan Zhang {
42457523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
42557523e68SFan Zhang 	uint32_t i;
42657523e68SFan Zhang 	int ret;
42757523e68SFan Zhang 
42857523e68SFan Zhang 	/* the dev should be stopped before being closed */
42957523e68SFan Zhang 	if (dev->data->dev_started)
43057523e68SFan Zhang 		return -EBUSY;
43157523e68SFan Zhang 
43285b00824SAdam Dybkowski 	/* close all workers first */
43385b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
43485b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
43585b00824SAdam Dybkowski 		struct rte_cryptodev *worker_dev =
43685b00824SAdam Dybkowski 				rte_cryptodev_pmd_get_dev(worker_dev_id);
43757523e68SFan Zhang 
43885b00824SAdam Dybkowski 		ret = (*worker_dev->dev_ops->dev_close)(worker_dev);
43957523e68SFan Zhang 		if (ret < 0)
44057523e68SFan Zhang 			return ret;
44157523e68SFan Zhang 	}
44257523e68SFan Zhang 
44357523e68SFan Zhang 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
44457523e68SFan Zhang 		struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
44557523e68SFan Zhang 
4468a48e039SFan Zhang 		if (qp_ctx->order_ring) {
4478a48e039SFan Zhang 			rte_ring_free(qp_ctx->order_ring);
4488a48e039SFan Zhang 			qp_ctx->order_ring = NULL;
44957523e68SFan Zhang 		}
45057523e68SFan Zhang 
45157523e68SFan Zhang 		if (qp_ctx->private_qp_ctx) {
45257523e68SFan Zhang 			rte_free(qp_ctx->private_qp_ctx);
45357523e68SFan Zhang 			qp_ctx->private_qp_ctx = NULL;
45457523e68SFan Zhang 		}
45557523e68SFan Zhang 	}
45657523e68SFan Zhang 
45706f0a569SPablo de Lara 	if (sched_ctx->private_ctx) {
45857523e68SFan Zhang 		rte_free(sched_ctx->private_ctx);
45906f0a569SPablo de Lara 		sched_ctx->private_ctx = NULL;
46006f0a569SPablo de Lara 	}
46157523e68SFan Zhang 
462e2af4e40SDavid Coyle 	scheduler_free_capabilities(sched_ctx);
46357523e68SFan Zhang 
46457523e68SFan Zhang 	return 0;
46557523e68SFan Zhang }
46657523e68SFan Zhang 
46757523e68SFan Zhang /** Get device statistics */
46857523e68SFan Zhang static void
46957523e68SFan Zhang scheduler_pmd_stats_get(struct rte_cryptodev *dev,
47057523e68SFan Zhang 	struct rte_cryptodev_stats *stats)
47157523e68SFan Zhang {
47257523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
47357523e68SFan Zhang 	uint32_t i;
47457523e68SFan Zhang 
47585b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
47685b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
47785b00824SAdam Dybkowski 		struct rte_cryptodev *worker_dev =
47885b00824SAdam Dybkowski 				rte_cryptodev_pmd_get_dev(worker_dev_id);
47985b00824SAdam Dybkowski 		struct rte_cryptodev_stats worker_stats = {0};
48057523e68SFan Zhang 
48185b00824SAdam Dybkowski 		(*worker_dev->dev_ops->stats_get)(worker_dev, &worker_stats);
48257523e68SFan Zhang 
48385b00824SAdam Dybkowski 		stats->enqueued_count += worker_stats.enqueued_count;
48485b00824SAdam Dybkowski 		stats->dequeued_count += worker_stats.dequeued_count;
48557523e68SFan Zhang 
48685b00824SAdam Dybkowski 		stats->enqueue_err_count += worker_stats.enqueue_err_count;
48785b00824SAdam Dybkowski 		stats->dequeue_err_count += worker_stats.dequeue_err_count;
48857523e68SFan Zhang 	}
48957523e68SFan Zhang }
49057523e68SFan Zhang 
49157523e68SFan Zhang /** Reset device statistics */
49257523e68SFan Zhang static void
49357523e68SFan Zhang scheduler_pmd_stats_reset(struct rte_cryptodev *dev)
49457523e68SFan Zhang {
49557523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
49657523e68SFan Zhang 	uint32_t i;
49757523e68SFan Zhang 
49885b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
49985b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
50085b00824SAdam Dybkowski 		struct rte_cryptodev *worker_dev =
50185b00824SAdam Dybkowski 				rte_cryptodev_pmd_get_dev(worker_dev_id);
50257523e68SFan Zhang 
50385b00824SAdam Dybkowski 		(*worker_dev->dev_ops->stats_reset)(worker_dev);
50457523e68SFan Zhang 	}
50557523e68SFan Zhang }
50657523e68SFan Zhang 
50757523e68SFan Zhang /** Get device info */
50857523e68SFan Zhang static void
50957523e68SFan Zhang scheduler_pmd_info_get(struct rte_cryptodev *dev,
51057523e68SFan Zhang 		struct rte_cryptodev_info *dev_info)
51157523e68SFan Zhang {
51257523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
513e1fc5b76SPablo de Lara 	uint32_t max_nb_sess = 0;
5146c8e8dfeSAnoob Joseph 	uint16_t headroom_sz = 0;
5156c8e8dfeSAnoob Joseph 	uint16_t tailroom_sz = 0;
51657523e68SFan Zhang 	uint32_t i;
51757523e68SFan Zhang 
51857523e68SFan Zhang 	if (!dev_info)
51957523e68SFan Zhang 		return;
52057523e68SFan Zhang 
52185b00824SAdam Dybkowski 	/* although scheduler_attach_init_worker presents multiple times,
52250e14527SFan Zhang 	 * there will be only 1 meaningful execution.
52350e14527SFan Zhang 	 */
52485b00824SAdam Dybkowski 	scheduler_attach_init_worker(dev);
52550e14527SFan Zhang 
52685b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
52785b00824SAdam Dybkowski 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
52885b00824SAdam Dybkowski 		struct rte_cryptodev_info worker_info;
52957523e68SFan Zhang 
53085b00824SAdam Dybkowski 		rte_cryptodev_info_get(worker_dev_id, &worker_info);
53185b00824SAdam Dybkowski 		uint32_t dev_max_sess = worker_info.sym.max_nb_sessions;
532e1fc5b76SPablo de Lara 		if (dev_max_sess != 0) {
533e1fc5b76SPablo de Lara 			if (max_nb_sess == 0 ||	dev_max_sess < max_nb_sess)
53485b00824SAdam Dybkowski 				max_nb_sess = worker_info.sym.max_nb_sessions;
535e1fc5b76SPablo de Lara 		}
5366c8e8dfeSAnoob Joseph 
53785b00824SAdam Dybkowski 		/* Get the max headroom requirement among worker PMDs */
53885b00824SAdam Dybkowski 		headroom_sz = worker_info.min_mbuf_headroom_req >
5396c8e8dfeSAnoob Joseph 				headroom_sz ?
54085b00824SAdam Dybkowski 				worker_info.min_mbuf_headroom_req :
5416c8e8dfeSAnoob Joseph 				headroom_sz;
5426c8e8dfeSAnoob Joseph 
54385b00824SAdam Dybkowski 		/* Get the max tailroom requirement among worker PMDs */
54485b00824SAdam Dybkowski 		tailroom_sz = worker_info.min_mbuf_tailroom_req >
5456c8e8dfeSAnoob Joseph 				tailroom_sz ?
54685b00824SAdam Dybkowski 				worker_info.min_mbuf_tailroom_req :
5476c8e8dfeSAnoob Joseph 				tailroom_sz;
54857523e68SFan Zhang 	}
54957523e68SFan Zhang 
5507a364faeSSlawomir Mrozowicz 	dev_info->driver_id = dev->driver_id;
55157523e68SFan Zhang 	dev_info->feature_flags = dev->feature_flags;
55257523e68SFan Zhang 	dev_info->capabilities = sched_ctx->capabilities;
55357523e68SFan Zhang 	dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
5546c8e8dfeSAnoob Joseph 	dev_info->min_mbuf_headroom_req = headroom_sz;
5556c8e8dfeSAnoob Joseph 	dev_info->min_mbuf_tailroom_req = tailroom_sz;
556e1fc5b76SPablo de Lara 	dev_info->sym.max_nb_sessions = max_nb_sess;
55757523e68SFan Zhang }
55857523e68SFan Zhang 
55957523e68SFan Zhang /** Release queue pair */
56057523e68SFan Zhang static int
56157523e68SFan Zhang scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
56257523e68SFan Zhang {
56357523e68SFan Zhang 	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
56457523e68SFan Zhang 
56557523e68SFan Zhang 	if (!qp_ctx)
56657523e68SFan Zhang 		return 0;
56757523e68SFan Zhang 
5688a48e039SFan Zhang 	rte_ring_free(qp_ctx->order_ring);
56957523e68SFan Zhang 	rte_free(qp_ctx->private_qp_ctx);
57057523e68SFan Zhang 
57157523e68SFan Zhang 	rte_free(qp_ctx);
57257523e68SFan Zhang 	dev->data->queue_pairs[qp_id] = NULL;
57357523e68SFan Zhang 
57457523e68SFan Zhang 	return 0;
57557523e68SFan Zhang }
57657523e68SFan Zhang 
57757523e68SFan Zhang /** Setup a queue pair */
57857523e68SFan Zhang static int
57957523e68SFan Zhang scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
580725d2a7fSFan Zhang 	const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
58157523e68SFan Zhang {
58257523e68SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
58357523e68SFan Zhang 	struct scheduler_qp_ctx *qp_ctx;
58457523e68SFan Zhang 	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
585c281019bSFan Zhang 	uint32_t i;
586c281019bSFan Zhang 	int ret;
58757523e68SFan Zhang 
58857523e68SFan Zhang 	if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
58957523e68SFan Zhang 			"CRYTO_SCHE PMD %u QP %u",
59057523e68SFan Zhang 			dev->data->dev_id, qp_id) < 0) {
59185aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Failed to create unique queue pair name");
59257523e68SFan Zhang 		return -EFAULT;
59357523e68SFan Zhang 	}
59457523e68SFan Zhang 
59557523e68SFan Zhang 	/* Free memory prior to re-allocation if needed. */
59657523e68SFan Zhang 	if (dev->data->queue_pairs[qp_id] != NULL)
59757523e68SFan Zhang 		scheduler_pmd_qp_release(dev, qp_id);
59857523e68SFan Zhang 
59985b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
60085b00824SAdam Dybkowski 		uint8_t worker_id = sched_ctx->workers[i].dev_id;
601c281019bSFan Zhang 
602f7db6f82SPablo de Lara 		/*
60385b00824SAdam Dybkowski 		 * All workers will share the same session mempool
604f7db6f82SPablo de Lara 		 * for session-less operations, so the objects
605f7db6f82SPablo de Lara 		 * must be big enough for all the drivers used.
606f7db6f82SPablo de Lara 		 */
60785b00824SAdam Dybkowski 		ret = rte_cryptodev_queue_pair_setup(worker_id, qp_id,
608725d2a7fSFan Zhang 				qp_conf, socket_id);
609c281019bSFan Zhang 		if (ret < 0)
610c281019bSFan Zhang 			return ret;
611c281019bSFan Zhang 	}
612c281019bSFan Zhang 
61357523e68SFan Zhang 	/* Allocate the queue pair data structure. */
61457523e68SFan Zhang 	qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
61557523e68SFan Zhang 			socket_id);
61657523e68SFan Zhang 	if (qp_ctx == NULL)
61757523e68SFan Zhang 		return -ENOMEM;
61857523e68SFan Zhang 
61988405476SFan Zhang 	/* The actual available object number = nb_descriptors - 1 */
62088405476SFan Zhang 	qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1;
62188405476SFan Zhang 
62257523e68SFan Zhang 	dev->data->queue_pairs[qp_id] = qp_ctx;
62357523e68SFan Zhang 
62485b00824SAdam Dybkowski 	/* although scheduler_attach_init_worker presents multiple times,
62550e14527SFan Zhang 	 * there will be only 1 meaningful execution.
62650e14527SFan Zhang 	 */
62785b00824SAdam Dybkowski 	ret = scheduler_attach_init_worker(dev);
62850e14527SFan Zhang 	if (ret < 0) {
62985b00824SAdam Dybkowski 		CR_SCHED_LOG(ERR, "Failed to attach worker");
63050e14527SFan Zhang 		scheduler_pmd_qp_release(dev, qp_id);
63150e14527SFan Zhang 		return ret;
63250e14527SFan Zhang 	}
63350e14527SFan Zhang 
63457523e68SFan Zhang 	if (*sched_ctx->ops.config_queue_pair) {
63557523e68SFan Zhang 		if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) {
63685aa6d34SHari Kumar 			CR_SCHED_LOG(ERR, "Unable to configure queue pair");
63757523e68SFan Zhang 			return -1;
63857523e68SFan Zhang 		}
63957523e68SFan Zhang 	}
64057523e68SFan Zhang 
64157523e68SFan Zhang 	return 0;
64257523e68SFan Zhang }
64357523e68SFan Zhang 
64457523e68SFan Zhang static uint32_t
645e2af4e40SDavid Coyle scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev)
64657523e68SFan Zhang {
647b3bbd9e5SSlawomir Mrozowicz 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
648b3bbd9e5SSlawomir Mrozowicz 
649e2af4e40SDavid Coyle 	return scheduler_session_size_get(sched_ctx, RTE_CRYPTO_OP_WITH_SESSION);
650b3bbd9e5SSlawomir Mrozowicz }
651b3bbd9e5SSlawomir Mrozowicz 
65257523e68SFan Zhang static int
6536812b9bfSFan Zhang scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev,
6546812b9bfSFan Zhang 	struct rte_crypto_sym_xform *xform,
6556812b9bfSFan Zhang 	struct rte_cryptodev_sym_session *sess)
65657523e68SFan Zhang {
6576812b9bfSFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
6586812b9bfSFan Zhang 
659e2af4e40SDavid Coyle 	return scheduler_session_create(sess, xform, sched_ctx, RTE_CRYPTO_OP_WITH_SESSION);
66057523e68SFan Zhang }
66157523e68SFan Zhang 
66257523e68SFan Zhang /** Clear the memory of session so it doesn't leave key material behind */
66357523e68SFan Zhang static void
6646812b9bfSFan Zhang scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev,
6656812b9bfSFan Zhang 		struct rte_cryptodev_sym_session *sess)
6666812b9bfSFan Zhang {
6676812b9bfSFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
6686812b9bfSFan Zhang 
669e2af4e40SDavid Coyle 	scheduler_session_destroy(sess, sched_ctx, RTE_CRYPTO_OP_WITH_SESSION);
6706812b9bfSFan Zhang }
67157523e68SFan Zhang 
672b74fd6b8SFerruh Yigit static struct rte_cryptodev_ops scheduler_pmd_ops = {
67357523e68SFan Zhang 		.dev_configure		= scheduler_pmd_config,
67457523e68SFan Zhang 		.dev_start		= scheduler_pmd_start,
67557523e68SFan Zhang 		.dev_stop		= scheduler_pmd_stop,
67657523e68SFan Zhang 		.dev_close		= scheduler_pmd_close,
67757523e68SFan Zhang 
67857523e68SFan Zhang 		.stats_get		= scheduler_pmd_stats_get,
67957523e68SFan Zhang 		.stats_reset		= scheduler_pmd_stats_reset,
68057523e68SFan Zhang 
68157523e68SFan Zhang 		.dev_infos_get		= scheduler_pmd_info_get,
68257523e68SFan Zhang 
68357523e68SFan Zhang 		.queue_pair_setup	= scheduler_pmd_qp_setup,
68457523e68SFan Zhang 		.queue_pair_release	= scheduler_pmd_qp_release,
68557523e68SFan Zhang 
686012c5076SPablo de Lara 		.sym_session_get_size	= scheduler_pmd_sym_session_get_size,
687012c5076SPablo de Lara 		.sym_session_configure	= scheduler_pmd_sym_session_configure,
688012c5076SPablo de Lara 		.sym_session_clear	= scheduler_pmd_sym_session_clear,
68957523e68SFan Zhang };
69057523e68SFan Zhang 
69157523e68SFan Zhang struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;
692e2af4e40SDavid Coyle 
693e2af4e40SDavid Coyle /** Configure a scheduler session from a security session configuration */
694e2af4e40SDavid Coyle static int
695e2af4e40SDavid Coyle scheduler_pmd_sec_sess_create(void *dev, struct rte_security_session_conf *conf,
696e2af4e40SDavid Coyle 			struct rte_security_session *sess)
697e2af4e40SDavid Coyle {
698e2af4e40SDavid Coyle 	struct rte_cryptodev *cdev = dev;
699e2af4e40SDavid Coyle 	struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
700e2af4e40SDavid Coyle 
701e2af4e40SDavid Coyle 	/* Check for supported security protocols */
702e2af4e40SDavid Coyle 	if (!scheduler_check_sec_proto_supp(conf->action_type, conf->protocol)) {
703e2af4e40SDavid Coyle 		CR_SCHED_LOG(ERR, "Unsupported security protocol");
704e2af4e40SDavid Coyle 		return -ENOTSUP;
705e2af4e40SDavid Coyle 	}
706e2af4e40SDavid Coyle 
707e2af4e40SDavid Coyle 	return scheduler_session_create(sess, conf, sched_ctx, RTE_CRYPTO_OP_SECURITY_SESSION);
708e2af4e40SDavid Coyle }
709e2af4e40SDavid Coyle 
710e2af4e40SDavid Coyle /** Clear the memory of session so it doesn't leave key material behind */
711e2af4e40SDavid Coyle static int
712e2af4e40SDavid Coyle scheduler_pmd_sec_sess_destroy(void *dev,
713e2af4e40SDavid Coyle 			       struct rte_security_session *sess)
714e2af4e40SDavid Coyle {
715e2af4e40SDavid Coyle 	struct rte_cryptodev *cdev = dev;
716e2af4e40SDavid Coyle 	struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
717e2af4e40SDavid Coyle 
718e2af4e40SDavid Coyle 	scheduler_session_destroy(sess, sched_ctx, RTE_CRYPTO_OP_SECURITY_SESSION);
719e2af4e40SDavid Coyle 
720e2af4e40SDavid Coyle 	return 0;
721e2af4e40SDavid Coyle }
722e2af4e40SDavid Coyle 
723e2af4e40SDavid Coyle /** Get sync security capabilities for scheduler pmds */
724e2af4e40SDavid Coyle static const struct rte_security_capability *
725e2af4e40SDavid Coyle scheduler_pmd_sec_capa_get(void *dev)
726e2af4e40SDavid Coyle {
727e2af4e40SDavid Coyle 	struct rte_cryptodev *cdev = dev;
728e2af4e40SDavid Coyle 	struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
729e2af4e40SDavid Coyle 
730e2af4e40SDavid Coyle 	return sched_ctx->sec_capabilities;
731e2af4e40SDavid Coyle }
732e2af4e40SDavid Coyle 
733e2af4e40SDavid Coyle static unsigned int
734e2af4e40SDavid Coyle scheduler_pmd_sec_sess_size_get(void *dev)
735e2af4e40SDavid Coyle {
736e2af4e40SDavid Coyle 	struct rte_cryptodev *cdev = dev;
737e2af4e40SDavid Coyle 	struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
738e2af4e40SDavid Coyle 
739e2af4e40SDavid Coyle 	return scheduler_session_size_get(sched_ctx,
740e2af4e40SDavid Coyle 				RTE_CRYPTO_OP_SECURITY_SESSION);
741e2af4e40SDavid Coyle }
742e2af4e40SDavid Coyle 
743e2af4e40SDavid Coyle static struct rte_security_ops scheduler_pmd_sec_ops = {
744e2af4e40SDavid Coyle 		.session_create = scheduler_pmd_sec_sess_create,
745e2af4e40SDavid Coyle 		.session_update = NULL,
746e2af4e40SDavid Coyle 		.session_get_size = scheduler_pmd_sec_sess_size_get,
747e2af4e40SDavid Coyle 		.session_stats_get = NULL,
748e2af4e40SDavid Coyle 		.session_destroy = scheduler_pmd_sec_sess_destroy,
749e2af4e40SDavid Coyle 		.set_pkt_metadata = NULL,
750e2af4e40SDavid Coyle 		.capabilities_get = scheduler_pmd_sec_capa_get
751e2af4e40SDavid Coyle };
752e2af4e40SDavid Coyle 
753e2af4e40SDavid Coyle struct rte_security_ops *rte_crypto_scheduler_pmd_sec_ops =
754e2af4e40SDavid Coyle 							&scheduler_pmd_sec_ops;
755