xref: /dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.c (revision e2af4e403c15b9de0d692288bbea866e981dba4d)
15566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
25566a3e3SBruce Richardson  * Copyright(c) 2017 Intel Corporation
331439ee7SFan Zhang  */
46723c0fcSBruce Richardson #include <rte_string_fns.h>
531439ee7SFan Zhang #include <rte_reorder.h>
631439ee7SFan Zhang #include <rte_cryptodev.h>
7af668035SAkhil Goyal #include <cryptodev_pmd.h>
8*e2af4e40SDavid Coyle #include <rte_security_driver.h>
931439ee7SFan Zhang #include <rte_malloc.h>
1031439ee7SFan Zhang 
11b88161beSBruce Richardson #include "rte_cryptodev_scheduler.h"
1231439ee7SFan Zhang #include "scheduler_pmd_private.h"
1331439ee7SFan Zhang 
14*e2af4e40SDavid Coyle #define MAX_CAPS 256
15*e2af4e40SDavid Coyle 
1631439ee7SFan Zhang /** update the scheduler pmd's capability with attaching device's
1731439ee7SFan Zhang  *  capability.
1831439ee7SFan Zhang  *  For each device to be attached, the scheduler's capability should be
1985b00824SAdam Dybkowski  *  the common capability set of all workers
2031439ee7SFan Zhang  **/
2131439ee7SFan Zhang static uint32_t
sync_caps(struct rte_cryptodev_capabilities * caps,uint32_t nb_caps,const struct rte_cryptodev_capabilities * worker_caps)2231439ee7SFan Zhang sync_caps(struct rte_cryptodev_capabilities *caps,
2331439ee7SFan Zhang 		uint32_t nb_caps,
2485b00824SAdam Dybkowski 		const struct rte_cryptodev_capabilities *worker_caps)
2531439ee7SFan Zhang {
2685b00824SAdam Dybkowski 	uint32_t sync_nb_caps = nb_caps, nb_worker_caps = 0;
2731439ee7SFan Zhang 	uint32_t i;
2831439ee7SFan Zhang 
2985b00824SAdam Dybkowski 	while (worker_caps[nb_worker_caps].op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
3085b00824SAdam Dybkowski 		nb_worker_caps++;
3131439ee7SFan Zhang 
3231439ee7SFan Zhang 	if (nb_caps == 0) {
3385b00824SAdam Dybkowski 		rte_memcpy(caps, worker_caps, sizeof(*caps) * nb_worker_caps);
3485b00824SAdam Dybkowski 		return nb_worker_caps;
3531439ee7SFan Zhang 	}
3631439ee7SFan Zhang 
3731439ee7SFan Zhang 	for (i = 0; i < sync_nb_caps; i++) {
3831439ee7SFan Zhang 		struct rte_cryptodev_capabilities *cap = &caps[i];
3931439ee7SFan Zhang 		uint32_t j;
4031439ee7SFan Zhang 
4185b00824SAdam Dybkowski 		for (j = 0; j < nb_worker_caps; j++) {
4231439ee7SFan Zhang 			const struct rte_cryptodev_capabilities *s_cap =
4385b00824SAdam Dybkowski 					&worker_caps[j];
4431439ee7SFan Zhang 
4531439ee7SFan Zhang 			if (s_cap->op != cap->op || s_cap->sym.xform_type !=
4631439ee7SFan Zhang 					cap->sym.xform_type)
4731439ee7SFan Zhang 				continue;
4831439ee7SFan Zhang 
4931439ee7SFan Zhang 			if (s_cap->sym.xform_type ==
5031439ee7SFan Zhang 					RTE_CRYPTO_SYM_XFORM_AUTH) {
5131439ee7SFan Zhang 				if (s_cap->sym.auth.algo !=
5231439ee7SFan Zhang 						cap->sym.auth.algo)
5331439ee7SFan Zhang 					continue;
5431439ee7SFan Zhang 
5531439ee7SFan Zhang 				cap->sym.auth.digest_size.min =
5631439ee7SFan Zhang 					s_cap->sym.auth.digest_size.min <
5731439ee7SFan Zhang 					cap->sym.auth.digest_size.min ?
5831439ee7SFan Zhang 					s_cap->sym.auth.digest_size.min :
5931439ee7SFan Zhang 					cap->sym.auth.digest_size.min;
6031439ee7SFan Zhang 				cap->sym.auth.digest_size.max =
6131439ee7SFan Zhang 					s_cap->sym.auth.digest_size.max <
6231439ee7SFan Zhang 					cap->sym.auth.digest_size.max ?
6331439ee7SFan Zhang 					s_cap->sym.auth.digest_size.max :
6431439ee7SFan Zhang 					cap->sym.auth.digest_size.max;
6531439ee7SFan Zhang 			}
6631439ee7SFan Zhang 
6731439ee7SFan Zhang 			if (s_cap->sym.xform_type ==
6831439ee7SFan Zhang 					RTE_CRYPTO_SYM_XFORM_CIPHER)
6931439ee7SFan Zhang 				if (s_cap->sym.cipher.algo !=
7031439ee7SFan Zhang 						cap->sym.cipher.algo)
7131439ee7SFan Zhang 					continue;
7231439ee7SFan Zhang 
7331439ee7SFan Zhang 			/* no common cap found */
7431439ee7SFan Zhang 			break;
7531439ee7SFan Zhang 		}
7631439ee7SFan Zhang 
7785b00824SAdam Dybkowski 		if (j < nb_worker_caps)
7831439ee7SFan Zhang 			continue;
7931439ee7SFan Zhang 
8031439ee7SFan Zhang 		/* remove a uncommon cap from the array */
8131439ee7SFan Zhang 		for (j = i; j < sync_nb_caps - 1; j++)
8231439ee7SFan Zhang 			rte_memcpy(&caps[j], &caps[j+1], sizeof(*cap));
8331439ee7SFan Zhang 
8431439ee7SFan Zhang 		memset(&caps[sync_nb_caps - 1], 0, sizeof(*cap));
8531439ee7SFan Zhang 		sync_nb_caps--;
86*e2af4e40SDavid Coyle 		i--;
8731439ee7SFan Zhang 	}
8831439ee7SFan Zhang 
8931439ee7SFan Zhang 	return sync_nb_caps;
9031439ee7SFan Zhang }
9131439ee7SFan Zhang 
9231439ee7SFan Zhang static int
check_sec_cap_equal(const struct rte_security_capability * sec_cap1,struct rte_security_capability * sec_cap2)93*e2af4e40SDavid Coyle check_sec_cap_equal(const struct rte_security_capability *sec_cap1,
94*e2af4e40SDavid Coyle 		struct rte_security_capability *sec_cap2)
9531439ee7SFan Zhang {
96*e2af4e40SDavid Coyle 	if (sec_cap1->action != sec_cap2->action ||
97*e2af4e40SDavid Coyle 			sec_cap1->protocol != sec_cap2->protocol ||
98*e2af4e40SDavid Coyle 			sec_cap1->ol_flags != sec_cap2->ol_flags)
99*e2af4e40SDavid Coyle 		return 0;
10031439ee7SFan Zhang 
101*e2af4e40SDavid Coyle 	if (sec_cap1->protocol == RTE_SECURITY_PROTOCOL_DOCSIS)
102*e2af4e40SDavid Coyle 		return !memcmp(&sec_cap1->docsis, &sec_cap2->docsis,
103*e2af4e40SDavid Coyle 				sizeof(sec_cap1->docsis));
104*e2af4e40SDavid Coyle 	else
105*e2af4e40SDavid Coyle 		return 0;
10606f0a569SPablo de Lara }
10731439ee7SFan Zhang 
108*e2af4e40SDavid Coyle static void
copy_sec_cap(struct rte_security_capability * dst_sec_cap,struct rte_security_capability * src_sec_cap)109*e2af4e40SDavid Coyle copy_sec_cap(struct rte_security_capability *dst_sec_cap,
110*e2af4e40SDavid Coyle 		struct rte_security_capability *src_sec_cap)
111*e2af4e40SDavid Coyle {
112*e2af4e40SDavid Coyle 	dst_sec_cap->action = src_sec_cap->action;
113*e2af4e40SDavid Coyle 	dst_sec_cap->protocol = src_sec_cap->protocol;
114*e2af4e40SDavid Coyle 	if (src_sec_cap->protocol == RTE_SECURITY_PROTOCOL_DOCSIS)
115*e2af4e40SDavid Coyle 		dst_sec_cap->docsis = src_sec_cap->docsis;
116*e2af4e40SDavid Coyle 	dst_sec_cap->ol_flags = src_sec_cap->ol_flags;
117*e2af4e40SDavid Coyle }
118*e2af4e40SDavid Coyle 
119*e2af4e40SDavid Coyle static uint32_t
sync_sec_crypto_caps(struct rte_cryptodev_capabilities * tmp_sec_crypto_caps,const struct rte_cryptodev_capabilities * sec_crypto_caps,const struct rte_cryptodev_capabilities * worker_sec_crypto_caps)120*e2af4e40SDavid Coyle sync_sec_crypto_caps(struct rte_cryptodev_capabilities *tmp_sec_crypto_caps,
121*e2af4e40SDavid Coyle 		const struct rte_cryptodev_capabilities *sec_crypto_caps,
122*e2af4e40SDavid Coyle 		const struct rte_cryptodev_capabilities *worker_sec_crypto_caps)
123*e2af4e40SDavid Coyle {
124*e2af4e40SDavid Coyle 	uint8_t nb_caps = 0;
125*e2af4e40SDavid Coyle 
126*e2af4e40SDavid Coyle 	nb_caps = sync_caps(tmp_sec_crypto_caps, nb_caps, sec_crypto_caps);
127*e2af4e40SDavid Coyle 	sync_caps(tmp_sec_crypto_caps, nb_caps, worker_sec_crypto_caps);
128*e2af4e40SDavid Coyle 
129*e2af4e40SDavid Coyle 	return nb_caps;
130*e2af4e40SDavid Coyle }
131*e2af4e40SDavid Coyle 
132*e2af4e40SDavid Coyle /** update the scheduler pmd's security capability with attaching device's
133*e2af4e40SDavid Coyle  *  security capability.
134*e2af4e40SDavid Coyle  *  For each device to be attached, the scheduler's security capability should
135*e2af4e40SDavid Coyle  *  be the common capability set of all workers
136*e2af4e40SDavid Coyle  **/
137*e2af4e40SDavid Coyle static uint32_t
sync_sec_caps(uint32_t worker_idx,struct rte_security_capability * sec_caps,struct rte_cryptodev_capabilities sec_crypto_caps[][MAX_CAPS],uint32_t nb_sec_caps,const struct rte_security_capability * worker_sec_caps)138*e2af4e40SDavid Coyle sync_sec_caps(uint32_t worker_idx,
139*e2af4e40SDavid Coyle 		struct rte_security_capability *sec_caps,
140*e2af4e40SDavid Coyle 		struct rte_cryptodev_capabilities sec_crypto_caps[][MAX_CAPS],
141*e2af4e40SDavid Coyle 		uint32_t nb_sec_caps,
142*e2af4e40SDavid Coyle 		const struct rte_security_capability *worker_sec_caps)
143*e2af4e40SDavid Coyle {
144*e2af4e40SDavid Coyle 	uint32_t nb_worker_sec_caps = 0, i;
145*e2af4e40SDavid Coyle 
146*e2af4e40SDavid Coyle 	if (worker_sec_caps == NULL)
147*e2af4e40SDavid Coyle 		return 0;
148*e2af4e40SDavid Coyle 
149*e2af4e40SDavid Coyle 	while (worker_sec_caps[nb_worker_sec_caps].action !=
150*e2af4e40SDavid Coyle 					RTE_SECURITY_ACTION_TYPE_NONE)
151*e2af4e40SDavid Coyle 		nb_worker_sec_caps++;
152*e2af4e40SDavid Coyle 
153*e2af4e40SDavid Coyle 	/* Handle first worker */
154*e2af4e40SDavid Coyle 	if (worker_idx == 0) {
155*e2af4e40SDavid Coyle 		uint32_t nb_worker_sec_crypto_caps = 0;
156*e2af4e40SDavid Coyle 		uint32_t nb_worker_supp_sec_caps = 0;
157*e2af4e40SDavid Coyle 
158*e2af4e40SDavid Coyle 		for (i = 0; i < nb_worker_sec_caps; i++) {
159*e2af4e40SDavid Coyle 			/* Check for supported security protocols */
160*e2af4e40SDavid Coyle 			if (!scheduler_check_sec_proto_supp(worker_sec_caps[i].action,
161*e2af4e40SDavid Coyle 					worker_sec_caps[i].protocol))
162*e2af4e40SDavid Coyle 				continue;
163*e2af4e40SDavid Coyle 
164*e2af4e40SDavid Coyle 			sec_caps[nb_worker_supp_sec_caps] = worker_sec_caps[i];
165*e2af4e40SDavid Coyle 
166*e2af4e40SDavid Coyle 			while (worker_sec_caps[i].crypto_capabilities[
167*e2af4e40SDavid Coyle 					nb_worker_sec_crypto_caps].op !=
168*e2af4e40SDavid Coyle 						RTE_CRYPTO_OP_TYPE_UNDEFINED)
169*e2af4e40SDavid Coyle 				nb_worker_sec_crypto_caps++;
170*e2af4e40SDavid Coyle 
171*e2af4e40SDavid Coyle 			rte_memcpy(&sec_crypto_caps[nb_worker_supp_sec_caps][0],
172*e2af4e40SDavid Coyle 				&worker_sec_caps[i].crypto_capabilities[0],
173*e2af4e40SDavid Coyle 				sizeof(sec_crypto_caps[nb_worker_supp_sec_caps][0]) *
174*e2af4e40SDavid Coyle 					nb_worker_sec_crypto_caps);
175*e2af4e40SDavid Coyle 
176*e2af4e40SDavid Coyle 			nb_worker_supp_sec_caps++;
177*e2af4e40SDavid Coyle 		}
178*e2af4e40SDavid Coyle 		return nb_worker_supp_sec_caps;
179*e2af4e40SDavid Coyle 	}
180*e2af4e40SDavid Coyle 
181*e2af4e40SDavid Coyle 	for (i = 0; i < nb_sec_caps; i++) {
182*e2af4e40SDavid Coyle 		struct rte_security_capability *sec_cap = &sec_caps[i];
183*e2af4e40SDavid Coyle 		uint32_t j;
184*e2af4e40SDavid Coyle 
185*e2af4e40SDavid Coyle 		for (j = 0; j < nb_worker_sec_caps; j++) {
186*e2af4e40SDavid Coyle 			struct rte_cryptodev_capabilities
187*e2af4e40SDavid Coyle 					tmp_sec_crypto_caps[MAX_CAPS] = { {0} };
188*e2af4e40SDavid Coyle 			uint32_t nb_sec_crypto_caps = 0;
189*e2af4e40SDavid Coyle 			const struct rte_security_capability *worker_sec_cap =
190*e2af4e40SDavid Coyle 								&worker_sec_caps[j];
191*e2af4e40SDavid Coyle 
192*e2af4e40SDavid Coyle 			if (!check_sec_cap_equal(worker_sec_cap, sec_cap))
193*e2af4e40SDavid Coyle 				continue;
194*e2af4e40SDavid Coyle 
195*e2af4e40SDavid Coyle 			/* Sync the crypto caps of the common security cap */
196*e2af4e40SDavid Coyle 			nb_sec_crypto_caps = sync_sec_crypto_caps(
197*e2af4e40SDavid Coyle 						tmp_sec_crypto_caps,
198*e2af4e40SDavid Coyle 						&sec_crypto_caps[i][0],
199*e2af4e40SDavid Coyle 						&worker_sec_cap->crypto_capabilities[0]);
200*e2af4e40SDavid Coyle 
201*e2af4e40SDavid Coyle 			memset(&sec_crypto_caps[i][0], 0,
202*e2af4e40SDavid Coyle 					sizeof(sec_crypto_caps[i][0]) * MAX_CAPS);
203*e2af4e40SDavid Coyle 
204*e2af4e40SDavid Coyle 			rte_memcpy(&sec_crypto_caps[i][0],
205*e2af4e40SDavid Coyle 					&tmp_sec_crypto_caps[0],
206*e2af4e40SDavid Coyle 					sizeof(sec_crypto_caps[i][0]) * nb_sec_crypto_caps);
207*e2af4e40SDavid Coyle 
208*e2af4e40SDavid Coyle 			break;
209*e2af4e40SDavid Coyle 		}
210*e2af4e40SDavid Coyle 
211*e2af4e40SDavid Coyle 		if (j < nb_worker_sec_caps)
212*e2af4e40SDavid Coyle 			continue;
213*e2af4e40SDavid Coyle 
214*e2af4e40SDavid Coyle 		/*
215*e2af4e40SDavid Coyle 		 * Remove an uncommon security cap, and it's associated crypto
216*e2af4e40SDavid Coyle 		 * caps, from the arrays
217*e2af4e40SDavid Coyle 		 */
218*e2af4e40SDavid Coyle 		for (j = i; j < nb_sec_caps - 1; j++) {
219*e2af4e40SDavid Coyle 			rte_memcpy(&sec_caps[j], &sec_caps[j+1],
220*e2af4e40SDavid Coyle 					sizeof(*sec_cap));
221*e2af4e40SDavid Coyle 
222*e2af4e40SDavid Coyle 			rte_memcpy(&sec_crypto_caps[j][0],
223*e2af4e40SDavid Coyle 					&sec_crypto_caps[j+1][0],
224*e2af4e40SDavid Coyle 					sizeof(*&sec_crypto_caps[j][0]) *
225*e2af4e40SDavid Coyle 						MAX_CAPS);
226*e2af4e40SDavid Coyle 		}
227*e2af4e40SDavid Coyle 		memset(&sec_caps[nb_sec_caps - 1], 0, sizeof(*sec_cap));
228*e2af4e40SDavid Coyle 		memset(&sec_crypto_caps[nb_sec_caps - 1][0], 0,
229*e2af4e40SDavid Coyle 			sizeof(*&sec_crypto_caps[nb_sec_caps - 1][0]) *
230*e2af4e40SDavid Coyle 				MAX_CAPS);
231*e2af4e40SDavid Coyle 		nb_sec_caps--;
232*e2af4e40SDavid Coyle 		i--;
233*e2af4e40SDavid Coyle 	}
234*e2af4e40SDavid Coyle 
235*e2af4e40SDavid Coyle 	return nb_sec_caps;
236*e2af4e40SDavid Coyle }
237*e2af4e40SDavid Coyle 
238*e2af4e40SDavid Coyle static int
update_scheduler_capability(struct scheduler_ctx * sched_ctx)239*e2af4e40SDavid Coyle update_scheduler_capability(struct scheduler_ctx *sched_ctx)
240*e2af4e40SDavid Coyle {
241*e2af4e40SDavid Coyle 	struct rte_cryptodev_capabilities tmp_caps[MAX_CAPS] = { {0} };
242*e2af4e40SDavid Coyle 	struct rte_security_capability tmp_sec_caps[MAX_CAPS] = { {0} };
243*e2af4e40SDavid Coyle 	struct rte_cryptodev_capabilities
244*e2af4e40SDavid Coyle 		tmp_sec_crypto_caps[MAX_CAPS][MAX_CAPS] = { {{0}} };
245*e2af4e40SDavid Coyle 	uint32_t nb_caps = 0, nb_sec_caps = 0, i;
24631439ee7SFan Zhang 	struct rte_cryptodev_info dev_info;
24731439ee7SFan Zhang 
248*e2af4e40SDavid Coyle 	/* Free any previously allocated capability memory */
249*e2af4e40SDavid Coyle 	scheduler_free_capabilities(sched_ctx);
250*e2af4e40SDavid Coyle 
251*e2af4e40SDavid Coyle 	/* Determine the new cryptodev capabilities for the scheduler */
252*e2af4e40SDavid Coyle 	for (i = 0; i < sched_ctx->nb_workers; i++) {
25385b00824SAdam Dybkowski 		rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info);
25431439ee7SFan Zhang 
25531439ee7SFan Zhang 		nb_caps = sync_caps(tmp_caps, nb_caps, dev_info.capabilities);
25631439ee7SFan Zhang 		if (nb_caps == 0)
25731439ee7SFan Zhang 			return -1;
25831439ee7SFan Zhang 	}
25931439ee7SFan Zhang 
26031439ee7SFan Zhang 	sched_ctx->capabilities = rte_zmalloc_socket(NULL,
26131439ee7SFan Zhang 			sizeof(struct rte_cryptodev_capabilities) *
26231439ee7SFan Zhang 			(nb_caps + 1), 0, SOCKET_ID_ANY);
26331439ee7SFan Zhang 	if (!sched_ctx->capabilities)
26431439ee7SFan Zhang 		return -ENOMEM;
26531439ee7SFan Zhang 
26631439ee7SFan Zhang 	rte_memcpy(sched_ctx->capabilities, tmp_caps,
26731439ee7SFan Zhang 			sizeof(struct rte_cryptodev_capabilities) * nb_caps);
26831439ee7SFan Zhang 
269*e2af4e40SDavid Coyle 	/* Determine the new security capabilities for the scheduler */
270*e2af4e40SDavid Coyle 	for (i = 0; i < sched_ctx->nb_workers; i++) {
271*e2af4e40SDavid Coyle 		struct rte_cryptodev *dev =
272*e2af4e40SDavid Coyle 				&rte_cryptodevs[sched_ctx->workers[i].dev_id];
273*e2af4e40SDavid Coyle 		struct rte_security_ctx *sec_ctx = dev->security_ctx;
274*e2af4e40SDavid Coyle 
275*e2af4e40SDavid Coyle 		nb_sec_caps = sync_sec_caps(i, tmp_sec_caps, tmp_sec_crypto_caps,
276*e2af4e40SDavid Coyle 			nb_sec_caps, rte_security_capabilities_get(sec_ctx));
277*e2af4e40SDavid Coyle 	}
278*e2af4e40SDavid Coyle 
279*e2af4e40SDavid Coyle 	sched_ctx->sec_capabilities = rte_zmalloc_socket(NULL,
280*e2af4e40SDavid Coyle 					sizeof(struct rte_security_capability) *
281*e2af4e40SDavid Coyle 					(nb_sec_caps + 1), 0, SOCKET_ID_ANY);
282*e2af4e40SDavid Coyle 	if (!sched_ctx->sec_capabilities)
283*e2af4e40SDavid Coyle 		return -ENOMEM;
284*e2af4e40SDavid Coyle 
285*e2af4e40SDavid Coyle 	sched_ctx->sec_crypto_capabilities = rte_zmalloc_socket(NULL,
286*e2af4e40SDavid Coyle 				sizeof(struct rte_cryptodev_capabilities *) *
287*e2af4e40SDavid Coyle 				(nb_sec_caps + 1),
288*e2af4e40SDavid Coyle 				0, SOCKET_ID_ANY);
289*e2af4e40SDavid Coyle 	if (!sched_ctx->sec_crypto_capabilities)
290*e2af4e40SDavid Coyle 		return -ENOMEM;
291*e2af4e40SDavid Coyle 
292*e2af4e40SDavid Coyle 	for (i = 0; i < nb_sec_caps; i++) {
293*e2af4e40SDavid Coyle 		uint16_t nb_sec_crypto_caps = 0;
294*e2af4e40SDavid Coyle 
295*e2af4e40SDavid Coyle 		copy_sec_cap(&sched_ctx->sec_capabilities[i], &tmp_sec_caps[i]);
296*e2af4e40SDavid Coyle 
297*e2af4e40SDavid Coyle 		while (tmp_sec_crypto_caps[i][nb_sec_crypto_caps].op !=
298*e2af4e40SDavid Coyle 						RTE_CRYPTO_OP_TYPE_UNDEFINED)
299*e2af4e40SDavid Coyle 			nb_sec_crypto_caps++;
300*e2af4e40SDavid Coyle 
301*e2af4e40SDavid Coyle 		sched_ctx->sec_crypto_capabilities[i] =
302*e2af4e40SDavid Coyle 			rte_zmalloc_socket(NULL,
303*e2af4e40SDavid Coyle 				sizeof(struct rte_cryptodev_capabilities) *
304*e2af4e40SDavid Coyle 				(nb_sec_crypto_caps + 1), 0, SOCKET_ID_ANY);
305*e2af4e40SDavid Coyle 		if (!sched_ctx->sec_crypto_capabilities[i])
306*e2af4e40SDavid Coyle 			return -ENOMEM;
307*e2af4e40SDavid Coyle 
308*e2af4e40SDavid Coyle 		rte_memcpy(sched_ctx->sec_crypto_capabilities[i],
309*e2af4e40SDavid Coyle 				&tmp_sec_crypto_caps[i][0],
310*e2af4e40SDavid Coyle 				sizeof(struct rte_cryptodev_capabilities)
311*e2af4e40SDavid Coyle 					* nb_sec_crypto_caps);
312*e2af4e40SDavid Coyle 
313*e2af4e40SDavid Coyle 		sched_ctx->sec_capabilities[i].crypto_capabilities =
314*e2af4e40SDavid Coyle 				sched_ctx->sec_crypto_capabilities[i];
315*e2af4e40SDavid Coyle 	}
316*e2af4e40SDavid Coyle 
31731439ee7SFan Zhang 	return 0;
31831439ee7SFan Zhang }
31931439ee7SFan Zhang 
32031439ee7SFan Zhang static void
update_scheduler_feature_flag(struct rte_cryptodev * dev)32131439ee7SFan Zhang update_scheduler_feature_flag(struct rte_cryptodev *dev)
32231439ee7SFan Zhang {
32331439ee7SFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
32431439ee7SFan Zhang 	uint32_t i;
32531439ee7SFan Zhang 
32631439ee7SFan Zhang 	dev->feature_flags = 0;
32731439ee7SFan Zhang 
32885b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
32931439ee7SFan Zhang 		struct rte_cryptodev_info dev_info;
33031439ee7SFan Zhang 
33185b00824SAdam Dybkowski 		rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info);
33231439ee7SFan Zhang 
33331439ee7SFan Zhang 		dev->feature_flags |= dev_info.feature_flags;
33431439ee7SFan Zhang 	}
33531439ee7SFan Zhang }
33631439ee7SFan Zhang 
33731439ee7SFan Zhang static void
update_max_nb_qp(struct scheduler_ctx * sched_ctx)33831439ee7SFan Zhang update_max_nb_qp(struct scheduler_ctx *sched_ctx)
33931439ee7SFan Zhang {
34031439ee7SFan Zhang 	uint32_t i;
34131439ee7SFan Zhang 	uint32_t max_nb_qp;
34231439ee7SFan Zhang 
34385b00824SAdam Dybkowski 	if (!sched_ctx->nb_workers)
34431439ee7SFan Zhang 		return;
34531439ee7SFan Zhang 
34685b00824SAdam Dybkowski 	max_nb_qp = sched_ctx->nb_workers ? UINT32_MAX : 0;
34731439ee7SFan Zhang 
34885b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++) {
34931439ee7SFan Zhang 		struct rte_cryptodev_info dev_info;
35031439ee7SFan Zhang 
35185b00824SAdam Dybkowski 		rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info);
35231439ee7SFan Zhang 		max_nb_qp = dev_info.max_nb_queue_pairs < max_nb_qp ?
35331439ee7SFan Zhang 				dev_info.max_nb_queue_pairs : max_nb_qp;
35431439ee7SFan Zhang 	}
35531439ee7SFan Zhang 
35631439ee7SFan Zhang 	sched_ctx->max_nb_queue_pairs = max_nb_qp;
35731439ee7SFan Zhang }
35831439ee7SFan Zhang 
35931439ee7SFan Zhang /** Attach a device to the scheduler. */
36031439ee7SFan Zhang int
rte_cryptodev_scheduler_worker_attach(uint8_t scheduler_id,uint8_t worker_id)36185b00824SAdam Dybkowski rte_cryptodev_scheduler_worker_attach(uint8_t scheduler_id, uint8_t worker_id)
36231439ee7SFan Zhang {
36331439ee7SFan Zhang 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
36431439ee7SFan Zhang 	struct scheduler_ctx *sched_ctx;
36585b00824SAdam Dybkowski 	struct scheduler_worker *worker;
36631439ee7SFan Zhang 	struct rte_cryptodev_info dev_info;
36731439ee7SFan Zhang 	uint32_t i;
36831439ee7SFan Zhang 
36931439ee7SFan Zhang 	if (!dev) {
37085aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Operation not supported");
37131439ee7SFan Zhang 		return -ENOTSUP;
37231439ee7SFan Zhang 	}
37331439ee7SFan Zhang 
374520dd992SFerruh Yigit 	if (dev->driver_id != cryptodev_scheduler_driver_id) {
37585aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Operation not supported");
37631439ee7SFan Zhang 		return -ENOTSUP;
37731439ee7SFan Zhang 	}
37831439ee7SFan Zhang 
37931439ee7SFan Zhang 	if (dev->data->dev_started) {
38085aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Illegal operation");
38131439ee7SFan Zhang 		return -EBUSY;
38231439ee7SFan Zhang 	}
38331439ee7SFan Zhang 
38431439ee7SFan Zhang 	sched_ctx = dev->data->dev_private;
38585b00824SAdam Dybkowski 	if (sched_ctx->nb_workers >=
38685b00824SAdam Dybkowski 			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS) {
38785b00824SAdam Dybkowski 		CR_SCHED_LOG(ERR, "Too many workers attached");
38831439ee7SFan Zhang 		return -ENOMEM;
38931439ee7SFan Zhang 	}
39031439ee7SFan Zhang 
39185b00824SAdam Dybkowski 	for (i = 0; i < sched_ctx->nb_workers; i++)
39285b00824SAdam Dybkowski 		if (sched_ctx->workers[i].dev_id == worker_id) {
39385b00824SAdam Dybkowski 			CR_SCHED_LOG(ERR, "Worker already added");
39431439ee7SFan Zhang 			return -ENOTSUP;
39531439ee7SFan Zhang 		}
39631439ee7SFan Zhang 
39785b00824SAdam Dybkowski 	worker = &sched_ctx->workers[sched_ctx->nb_workers];
39831439ee7SFan Zhang 
39985b00824SAdam Dybkowski 	rte_cryptodev_info_get(worker_id, &dev_info);
40031439ee7SFan Zhang 
40185b00824SAdam Dybkowski 	worker->dev_id = worker_id;
40285b00824SAdam Dybkowski 	worker->driver_id = dev_info.driver_id;
40385b00824SAdam Dybkowski 	sched_ctx->nb_workers++;
40431439ee7SFan Zhang 
40531439ee7SFan Zhang 	if (update_scheduler_capability(sched_ctx) < 0) {
406*e2af4e40SDavid Coyle 		scheduler_free_capabilities(sched_ctx);
40785b00824SAdam Dybkowski 		worker->dev_id = 0;
40885b00824SAdam Dybkowski 		worker->driver_id = 0;
40985b00824SAdam Dybkowski 		sched_ctx->nb_workers--;
41031439ee7SFan Zhang 
41185aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "capabilities update failed");
41231439ee7SFan Zhang 		return -ENOTSUP;
41331439ee7SFan Zhang 	}
41431439ee7SFan Zhang 
41531439ee7SFan Zhang 	update_scheduler_feature_flag(dev);
41631439ee7SFan Zhang 
41731439ee7SFan Zhang 	update_max_nb_qp(sched_ctx);
41831439ee7SFan Zhang 
41931439ee7SFan Zhang 	return 0;
42031439ee7SFan Zhang }
42131439ee7SFan Zhang 
42231439ee7SFan Zhang int
rte_cryptodev_scheduler_worker_detach(uint8_t scheduler_id,uint8_t worker_id)42385b00824SAdam Dybkowski rte_cryptodev_scheduler_worker_detach(uint8_t scheduler_id, uint8_t worker_id)
42431439ee7SFan Zhang {
42531439ee7SFan Zhang 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
42631439ee7SFan Zhang 	struct scheduler_ctx *sched_ctx;
42785b00824SAdam Dybkowski 	uint32_t i, worker_pos;
42831439ee7SFan Zhang 
42931439ee7SFan Zhang 	if (!dev) {
43085aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Operation not supported");
43131439ee7SFan Zhang 		return -ENOTSUP;
43231439ee7SFan Zhang 	}
43331439ee7SFan Zhang 
434520dd992SFerruh Yigit 	if (dev->driver_id != cryptodev_scheduler_driver_id) {
43585aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Operation not supported");
43631439ee7SFan Zhang 		return -ENOTSUP;
43731439ee7SFan Zhang 	}
43831439ee7SFan Zhang 
43931439ee7SFan Zhang 	if (dev->data->dev_started) {
44085aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Illegal operation");
44131439ee7SFan Zhang 		return -EBUSY;
44231439ee7SFan Zhang 	}
44331439ee7SFan Zhang 
44431439ee7SFan Zhang 	sched_ctx = dev->data->dev_private;
44531439ee7SFan Zhang 
44685b00824SAdam Dybkowski 	for (worker_pos = 0; worker_pos < sched_ctx->nb_workers; worker_pos++)
44785b00824SAdam Dybkowski 		if (sched_ctx->workers[worker_pos].dev_id == worker_id)
44831439ee7SFan Zhang 			break;
44985b00824SAdam Dybkowski 	if (worker_pos == sched_ctx->nb_workers) {
45085b00824SAdam Dybkowski 		CR_SCHED_LOG(ERR, "Cannot find worker");
45131439ee7SFan Zhang 		return -ENOTSUP;
45231439ee7SFan Zhang 	}
45331439ee7SFan Zhang 
45485b00824SAdam Dybkowski 	if (sched_ctx->ops.worker_detach(dev, worker_id) < 0) {
45585b00824SAdam Dybkowski 		CR_SCHED_LOG(ERR, "Failed to detach worker");
45631439ee7SFan Zhang 		return -ENOTSUP;
45731439ee7SFan Zhang 	}
45831439ee7SFan Zhang 
45985b00824SAdam Dybkowski 	for (i = worker_pos; i < sched_ctx->nb_workers - 1; i++) {
46085b00824SAdam Dybkowski 		memcpy(&sched_ctx->workers[i], &sched_ctx->workers[i+1],
46185b00824SAdam Dybkowski 				sizeof(struct scheduler_worker));
46231439ee7SFan Zhang 	}
46385b00824SAdam Dybkowski 	memset(&sched_ctx->workers[sched_ctx->nb_workers - 1], 0,
46485b00824SAdam Dybkowski 			sizeof(struct scheduler_worker));
46585b00824SAdam Dybkowski 	sched_ctx->nb_workers--;
46631439ee7SFan Zhang 
46731439ee7SFan Zhang 	if (update_scheduler_capability(sched_ctx) < 0) {
468*e2af4e40SDavid Coyle 		scheduler_free_capabilities(sched_ctx);
46985aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "capabilities update failed");
47031439ee7SFan Zhang 		return -ENOTSUP;
47131439ee7SFan Zhang 	}
47231439ee7SFan Zhang 
47331439ee7SFan Zhang 	update_scheduler_feature_flag(dev);
47431439ee7SFan Zhang 
47531439ee7SFan Zhang 	update_max_nb_qp(sched_ctx);
47631439ee7SFan Zhang 
47731439ee7SFan Zhang 	return 0;
47831439ee7SFan Zhang }
47931439ee7SFan Zhang 
48031439ee7SFan Zhang int
rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,enum rte_cryptodev_scheduler_mode mode)4813fb45fdbSFan Zhang rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
48231439ee7SFan Zhang 		enum rte_cryptodev_scheduler_mode mode)
48331439ee7SFan Zhang {
48431439ee7SFan Zhang 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
48531439ee7SFan Zhang 	struct scheduler_ctx *sched_ctx;
48631439ee7SFan Zhang 
48731439ee7SFan Zhang 	if (!dev) {
48885aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Operation not supported");
48931439ee7SFan Zhang 		return -ENOTSUP;
49031439ee7SFan Zhang 	}
49131439ee7SFan Zhang 
492520dd992SFerruh Yigit 	if (dev->driver_id != cryptodev_scheduler_driver_id) {
49385aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Operation not supported");
49431439ee7SFan Zhang 		return -ENOTSUP;
49531439ee7SFan Zhang 	}
49631439ee7SFan Zhang 
49731439ee7SFan Zhang 	if (dev->data->dev_started) {
49885aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Illegal operation");
49931439ee7SFan Zhang 		return -EBUSY;
50031439ee7SFan Zhang 	}
50131439ee7SFan Zhang 
50231439ee7SFan Zhang 	sched_ctx = dev->data->dev_private;
50331439ee7SFan Zhang 
50431439ee7SFan Zhang 	if (mode == sched_ctx->mode)
50531439ee7SFan Zhang 		return 0;
50631439ee7SFan Zhang 
50731439ee7SFan Zhang 	switch (mode) {
508100e4f7eSFan Zhang 	case CDEV_SCHED_MODE_ROUNDROBIN:
509100e4f7eSFan Zhang 		if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
510520dd992SFerruh Yigit 				crypto_scheduler_roundrobin) < 0) {
51185aa6d34SHari Kumar 			CR_SCHED_LOG(ERR, "Failed to load scheduler");
512100e4f7eSFan Zhang 			return -1;
513100e4f7eSFan Zhang 		}
514100e4f7eSFan Zhang 		break;
515a783aa63SFan Zhang 	case CDEV_SCHED_MODE_PKT_SIZE_DISTR:
516a783aa63SFan Zhang 		if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
517520dd992SFerruh Yigit 				crypto_scheduler_pkt_size_based_distr) < 0) {
51885aa6d34SHari Kumar 			CR_SCHED_LOG(ERR, "Failed to load scheduler");
519a783aa63SFan Zhang 			return -1;
520a783aa63SFan Zhang 		}
521a783aa63SFan Zhang 		break;
52237f075daSFan Zhang 	case CDEV_SCHED_MODE_FAILOVER:
52337f075daSFan Zhang 		if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
524520dd992SFerruh Yigit 				crypto_scheduler_failover) < 0) {
52585aa6d34SHari Kumar 			CR_SCHED_LOG(ERR, "Failed to load scheduler");
52637f075daSFan Zhang 			return -1;
52737f075daSFan Zhang 		}
52837f075daSFan Zhang 		break;
5294c07e055SKirill Rybalchenko 	case CDEV_SCHED_MODE_MULTICORE:
5304c07e055SKirill Rybalchenko 		if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
531520dd992SFerruh Yigit 				crypto_scheduler_multicore) < 0) {
53285aa6d34SHari Kumar 			CR_SCHED_LOG(ERR, "Failed to load scheduler");
5334c07e055SKirill Rybalchenko 			return -1;
5344c07e055SKirill Rybalchenko 		}
5354c07e055SKirill Rybalchenko 		break;
53631439ee7SFan Zhang 	default:
53785aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Not yet supported");
53831439ee7SFan Zhang 		return -ENOTSUP;
53931439ee7SFan Zhang 	}
54031439ee7SFan Zhang 
54131439ee7SFan Zhang 	return 0;
54231439ee7SFan Zhang }
54331439ee7SFan Zhang 
54431439ee7SFan Zhang enum rte_cryptodev_scheduler_mode
rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)5453fb45fdbSFan Zhang rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)
54631439ee7SFan Zhang {
54731439ee7SFan Zhang 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
54831439ee7SFan Zhang 	struct scheduler_ctx *sched_ctx;
54931439ee7SFan Zhang 
55031439ee7SFan Zhang 	if (!dev) {
55185aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Operation not supported");
55231439ee7SFan Zhang 		return -ENOTSUP;
55331439ee7SFan Zhang 	}
55431439ee7SFan Zhang 
555520dd992SFerruh Yigit 	if (dev->driver_id != cryptodev_scheduler_driver_id) {
55685aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Operation not supported");
55731439ee7SFan Zhang 		return -ENOTSUP;
55831439ee7SFan Zhang 	}
55931439ee7SFan Zhang 
56031439ee7SFan Zhang 	sched_ctx = dev->data->dev_private;
56131439ee7SFan Zhang 
56231439ee7SFan Zhang 	return sched_ctx->mode;
56331439ee7SFan Zhang }
56431439ee7SFan Zhang 
56531439ee7SFan Zhang int
rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,uint32_t enable_reorder)56631439ee7SFan Zhang rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
56731439ee7SFan Zhang 		uint32_t enable_reorder)
56831439ee7SFan Zhang {
56931439ee7SFan Zhang 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
57031439ee7SFan Zhang 	struct scheduler_ctx *sched_ctx;
57131439ee7SFan Zhang 
57231439ee7SFan Zhang 	if (!dev) {
57385aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Operation not supported");
57431439ee7SFan Zhang 		return -ENOTSUP;
57531439ee7SFan Zhang 	}
57631439ee7SFan Zhang 
577520dd992SFerruh Yigit 	if (dev->driver_id != cryptodev_scheduler_driver_id) {
57885aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Operation not supported");
57931439ee7SFan Zhang 		return -ENOTSUP;
58031439ee7SFan Zhang 	}
58131439ee7SFan Zhang 
58231439ee7SFan Zhang 	if (dev->data->dev_started) {
58385aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Illegal operation");
58431439ee7SFan Zhang 		return -EBUSY;
58531439ee7SFan Zhang 	}
58631439ee7SFan Zhang 
58731439ee7SFan Zhang 	sched_ctx = dev->data->dev_private;
58831439ee7SFan Zhang 
58931439ee7SFan Zhang 	sched_ctx->reordering_enabled = enable_reorder;
59031439ee7SFan Zhang 
59131439ee7SFan Zhang 	return 0;
59231439ee7SFan Zhang }
59331439ee7SFan Zhang 
59431439ee7SFan Zhang int
rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id)59531439ee7SFan Zhang rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id)
59631439ee7SFan Zhang {
59731439ee7SFan Zhang 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
59831439ee7SFan Zhang 	struct scheduler_ctx *sched_ctx;
59931439ee7SFan Zhang 
60031439ee7SFan Zhang 	if (!dev) {
60185aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Operation not supported");
60231439ee7SFan Zhang 		return -ENOTSUP;
60331439ee7SFan Zhang 	}
60431439ee7SFan Zhang 
605520dd992SFerruh Yigit 	if (dev->driver_id != cryptodev_scheduler_driver_id) {
60685aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Operation not supported");
60731439ee7SFan Zhang 		return -ENOTSUP;
60831439ee7SFan Zhang 	}
60931439ee7SFan Zhang 
61031439ee7SFan Zhang 	sched_ctx = dev->data->dev_private;
61131439ee7SFan Zhang 
61231439ee7SFan Zhang 	return (int)sched_ctx->reordering_enabled;
61331439ee7SFan Zhang }
61431439ee7SFan Zhang 
61531439ee7SFan Zhang int
rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,struct rte_cryptodev_scheduler * scheduler)61631439ee7SFan Zhang rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
61731439ee7SFan Zhang 		struct rte_cryptodev_scheduler *scheduler) {
61831439ee7SFan Zhang 
61931439ee7SFan Zhang 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
62031439ee7SFan Zhang 	struct scheduler_ctx *sched_ctx;
62131439ee7SFan Zhang 
62231439ee7SFan Zhang 	if (!dev) {
62385aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Operation not supported");
62431439ee7SFan Zhang 		return -ENOTSUP;
62531439ee7SFan Zhang 	}
62631439ee7SFan Zhang 
627520dd992SFerruh Yigit 	if (dev->driver_id != cryptodev_scheduler_driver_id) {
62885aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Operation not supported");
62931439ee7SFan Zhang 		return -ENOTSUP;
63031439ee7SFan Zhang 	}
63131439ee7SFan Zhang 
63231439ee7SFan Zhang 	if (dev->data->dev_started) {
63385aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Illegal operation");
63431439ee7SFan Zhang 		return -EBUSY;
63531439ee7SFan Zhang 	}
63631439ee7SFan Zhang 
63731439ee7SFan Zhang 	sched_ctx = dev->data->dev_private;
63831439ee7SFan Zhang 
639d040aca6SPablo de Lara 	if (strlen(scheduler->name) > RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
64085aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Invalid name %s, should be less than "
64185aa6d34SHari Kumar 				"%u bytes.", scheduler->name,
642d040aca6SPablo de Lara 				RTE_CRYPTODEV_NAME_MAX_LEN);
643d040aca6SPablo de Lara 		return -EINVAL;
644d040aca6SPablo de Lara 	}
6456723c0fcSBruce Richardson 	strlcpy(sched_ctx->name, scheduler->name, sizeof(sched_ctx->name));
646d040aca6SPablo de Lara 
647d040aca6SPablo de Lara 	if (strlen(scheduler->description) >
648d040aca6SPablo de Lara 			RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1) {
64985aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Invalid description %s, should be less than "
65085aa6d34SHari Kumar 				"%u bytes.", scheduler->description,
651d040aca6SPablo de Lara 				RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1);
652d040aca6SPablo de Lara 		return -EINVAL;
653d040aca6SPablo de Lara 	}
6546723c0fcSBruce Richardson 	strlcpy(sched_ctx->description, scheduler->description,
6556723c0fcSBruce Richardson 		sizeof(sched_ctx->description));
65631439ee7SFan Zhang 
65731439ee7SFan Zhang 	/* load scheduler instance operations functions */
65831439ee7SFan Zhang 	sched_ctx->ops.config_queue_pair = scheduler->ops->config_queue_pair;
65931439ee7SFan Zhang 	sched_ctx->ops.create_private_ctx = scheduler->ops->create_private_ctx;
66031439ee7SFan Zhang 	sched_ctx->ops.scheduler_start = scheduler->ops->scheduler_start;
66131439ee7SFan Zhang 	sched_ctx->ops.scheduler_stop = scheduler->ops->scheduler_stop;
66285b00824SAdam Dybkowski 	sched_ctx->ops.worker_attach = scheduler->ops->worker_attach;
66385b00824SAdam Dybkowski 	sched_ctx->ops.worker_detach = scheduler->ops->worker_detach;
6644e30ead5SFan Zhang 	sched_ctx->ops.option_set = scheduler->ops->option_set;
6654e30ead5SFan Zhang 	sched_ctx->ops.option_get = scheduler->ops->option_get;
66631439ee7SFan Zhang 
66706f0a569SPablo de Lara 	if (sched_ctx->private_ctx) {
66831439ee7SFan Zhang 		rte_free(sched_ctx->private_ctx);
66906f0a569SPablo de Lara 		sched_ctx->private_ctx = NULL;
67006f0a569SPablo de Lara 	}
67131439ee7SFan Zhang 
67231439ee7SFan Zhang 	if (sched_ctx->ops.create_private_ctx) {
67331439ee7SFan Zhang 		int ret = (*sched_ctx->ops.create_private_ctx)(dev);
67431439ee7SFan Zhang 
67531439ee7SFan Zhang 		if (ret < 0) {
67685aa6d34SHari Kumar 			CR_SCHED_LOG(ERR, "Unable to create scheduler private "
67731439ee7SFan Zhang 					"context");
67831439ee7SFan Zhang 			return ret;
67931439ee7SFan Zhang 		}
68031439ee7SFan Zhang 	}
68131439ee7SFan Zhang 
68231439ee7SFan Zhang 	sched_ctx->mode = scheduler->mode;
68331439ee7SFan Zhang 
68431439ee7SFan Zhang 	return 0;
68531439ee7SFan Zhang }
686029bb907SFan Zhang 
687029bb907SFan Zhang int
rte_cryptodev_scheduler_workers_get(uint8_t scheduler_id,uint8_t * workers)68885b00824SAdam Dybkowski rte_cryptodev_scheduler_workers_get(uint8_t scheduler_id, uint8_t *workers)
689029bb907SFan Zhang {
690029bb907SFan Zhang 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
691029bb907SFan Zhang 	struct scheduler_ctx *sched_ctx;
69285b00824SAdam Dybkowski 	uint32_t nb_workers = 0;
693029bb907SFan Zhang 
694029bb907SFan Zhang 	if (!dev) {
69585aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Operation not supported");
696029bb907SFan Zhang 		return -ENOTSUP;
697029bb907SFan Zhang 	}
698029bb907SFan Zhang 
699520dd992SFerruh Yigit 	if (dev->driver_id != cryptodev_scheduler_driver_id) {
70085aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Operation not supported");
701029bb907SFan Zhang 		return -ENOTSUP;
702029bb907SFan Zhang 	}
703029bb907SFan Zhang 
704029bb907SFan Zhang 	sched_ctx = dev->data->dev_private;
705029bb907SFan Zhang 
70685b00824SAdam Dybkowski 	nb_workers = sched_ctx->nb_workers;
707029bb907SFan Zhang 
70885b00824SAdam Dybkowski 	if (workers && nb_workers) {
709029bb907SFan Zhang 		uint32_t i;
710029bb907SFan Zhang 
71185b00824SAdam Dybkowski 		for (i = 0; i < nb_workers; i++)
71285b00824SAdam Dybkowski 			workers[i] = sched_ctx->workers[i].dev_id;
713029bb907SFan Zhang 	}
714029bb907SFan Zhang 
71585b00824SAdam Dybkowski 	return (int)nb_workers;
716029bb907SFan Zhang }
7174e30ead5SFan Zhang 
7184e30ead5SFan Zhang int
rte_cryptodev_scheduler_option_set(uint8_t scheduler_id,enum rte_cryptodev_schedule_option_type option_type,void * option)7194e30ead5SFan Zhang rte_cryptodev_scheduler_option_set(uint8_t scheduler_id,
7204e30ead5SFan Zhang 		enum rte_cryptodev_schedule_option_type option_type,
7214e30ead5SFan Zhang 		void *option)
7224e30ead5SFan Zhang {
7234e30ead5SFan Zhang 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
7244e30ead5SFan Zhang 	struct scheduler_ctx *sched_ctx;
7254e30ead5SFan Zhang 
7264e30ead5SFan Zhang 	if (option_type == CDEV_SCHED_OPTION_NOT_SET ||
7274e30ead5SFan Zhang 			option_type >= CDEV_SCHED_OPTION_COUNT) {
72885aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Invalid option parameter");
7294e30ead5SFan Zhang 		return -EINVAL;
7304e30ead5SFan Zhang 	}
7314e30ead5SFan Zhang 
7324e30ead5SFan Zhang 	if (!option) {
73385aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Invalid option parameter");
7344e30ead5SFan Zhang 		return -EINVAL;
7354e30ead5SFan Zhang 	}
7364e30ead5SFan Zhang 
7374e30ead5SFan Zhang 	if (dev->data->dev_started) {
73885aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Illegal operation");
7394e30ead5SFan Zhang 		return -EBUSY;
7404e30ead5SFan Zhang 	}
7414e30ead5SFan Zhang 
7424e30ead5SFan Zhang 	sched_ctx = dev->data->dev_private;
7434e30ead5SFan Zhang 
7448f1d23ecSDavid Marchand 	if (*sched_ctx->ops.option_set == NULL)
7458f1d23ecSDavid Marchand 		return -ENOTSUP;
7464e30ead5SFan Zhang 
7474e30ead5SFan Zhang 	return (*sched_ctx->ops.option_set)(dev, option_type, option);
7484e30ead5SFan Zhang }
7494e30ead5SFan Zhang 
7504e30ead5SFan Zhang int
rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,enum rte_cryptodev_schedule_option_type option_type,void * option)7514e30ead5SFan Zhang rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
7524e30ead5SFan Zhang 		enum rte_cryptodev_schedule_option_type option_type,
7534e30ead5SFan Zhang 		void *option)
7544e30ead5SFan Zhang {
7554e30ead5SFan Zhang 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
7564e30ead5SFan Zhang 	struct scheduler_ctx *sched_ctx;
7574e30ead5SFan Zhang 
7584e30ead5SFan Zhang 	if (!dev) {
75985aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Operation not supported");
7604e30ead5SFan Zhang 		return -ENOTSUP;
7614e30ead5SFan Zhang 	}
7624e30ead5SFan Zhang 
7634e30ead5SFan Zhang 	if (!option) {
76485aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Invalid option parameter");
7654e30ead5SFan Zhang 		return -EINVAL;
7664e30ead5SFan Zhang 	}
7674e30ead5SFan Zhang 
768520dd992SFerruh Yigit 	if (dev->driver_id != cryptodev_scheduler_driver_id) {
76985aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "Operation not supported");
7704e30ead5SFan Zhang 		return -ENOTSUP;
7714e30ead5SFan Zhang 	}
7724e30ead5SFan Zhang 
7734e30ead5SFan Zhang 	sched_ctx = dev->data->dev_private;
7744e30ead5SFan Zhang 
7758f1d23ecSDavid Marchand 	if (*sched_ctx->ops.option_get == NULL)
7768f1d23ecSDavid Marchand 		return -ENOTSUP;
7774e30ead5SFan Zhang 
7784e30ead5SFan Zhang 	return (*sched_ctx->ops.option_get)(dev, option_type, option);
7794e30ead5SFan Zhang }
78085aa6d34SHari Kumar 
7819c99878aSJerin Jacob 
782eeded204SDavid Marchand RTE_LOG_REGISTER_DEFAULT(scheduler_logtype_driver, INFO);
783