xref: /dpdk/drivers/crypto/scheduler/scheduler_pmd_private.h (revision 2b843cac232eb3f2fa79e4254e21766817e2019f)
15566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
25566a3e3SBruce Richardson  * Copyright(c) 2017 Intel Corporation
3870babebSFan Zhang  */
4870babebSFan Zhang 
5870babebSFan Zhang #ifndef _SCHEDULER_PMD_PRIVATE_H
6870babebSFan Zhang #define _SCHEDULER_PMD_PRIVATE_H
7870babebSFan Zhang 
8e2af4e40SDavid Coyle #include <rte_security_driver.h>
9e2af4e40SDavid Coyle 
10b88161beSBruce Richardson #include "rte_cryptodev_scheduler.h"
11870babebSFan Zhang 
12a3277ad4SSlawomir Mrozowicz #define CRYPTODEV_NAME_SCHEDULER_PMD	crypto_scheduler
13a3277ad4SSlawomir Mrozowicz /**< Scheduler Crypto PMD device name */
14a3277ad4SSlawomir Mrozowicz 
1585b00824SAdam Dybkowski #define PER_WORKER_BUFF_SIZE			(256)
16870babebSFan Zhang 
1785aa6d34SHari Kumar extern int scheduler_logtype_driver;
18*2b843cacSDavid Marchand #define RTE_LOGTYPE_SCHEDULER_DRIVER scheduler_logtype_driver
19870babebSFan Zhang 
20*2b843cacSDavid Marchand #define CR_SCHED_LOG(level, ...) \
21*2b843cacSDavid Marchand 	RTE_LOG_LINE_PREFIX(level, SCHEDULER_DRIVER, "%s() line %u: ", \
22*2b843cacSDavid Marchand 		__func__ RTE_LOG_COMMA __LINE__, __VA_ARGS__)
23870babebSFan Zhang 
2485b00824SAdam Dybkowski struct scheduler_worker {
25870babebSFan Zhang 	uint8_t dev_id;
26870babebSFan Zhang 	uint16_t qp_id;
27870babebSFan Zhang 	uint32_t nb_inflight_cops;
287a364faeSSlawomir Mrozowicz 	uint8_t driver_id;
29870babebSFan Zhang };
30870babebSFan Zhang 
3127595cd8STyler Retzlaff struct __rte_cache_aligned scheduler_ctx {
32870babebSFan Zhang 	void *private_ctx;
33870babebSFan Zhang 	/**< private scheduler context pointer */
34870babebSFan Zhang 
35870babebSFan Zhang 	struct rte_cryptodev_capabilities *capabilities;
36e2af4e40SDavid Coyle 	struct rte_security_capability *sec_capabilities;
37e2af4e40SDavid Coyle 	struct rte_cryptodev_capabilities **sec_crypto_capabilities;
38870babebSFan Zhang 
39870babebSFan Zhang 	uint32_t max_nb_queue_pairs;
40870babebSFan Zhang 
4185b00824SAdam Dybkowski 	struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
4285b00824SAdam Dybkowski 	uint32_t nb_workers;
436812b9bfSFan Zhang 	/* reference count when the workers are incremented/decremented */
446812b9bfSFan Zhang 	uint32_t ref_cnt;
45870babebSFan Zhang 
46870babebSFan Zhang 	enum rte_cryptodev_scheduler_mode mode;
47870babebSFan Zhang 
48870babebSFan Zhang 	struct rte_cryptodev_scheduler_ops ops;
49870babebSFan Zhang 
50870babebSFan Zhang 	uint8_t reordering_enabled;
51870babebSFan Zhang 
52870babebSFan Zhang 	char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
53870babebSFan Zhang 	char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
541b78e3f2SKirill Rybalchenko 	uint16_t wc_pool[RTE_MAX_LCORE];
554c07e055SKirill Rybalchenko 	uint16_t nb_wc;
5650e14527SFan Zhang 
5785b00824SAdam Dybkowski 	char *init_worker_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
5885b00824SAdam Dybkowski 	int nb_init_workers;
5927595cd8STyler Retzlaff };
60870babebSFan Zhang 
6127595cd8STyler Retzlaff struct __rte_cache_aligned scheduler_qp_ctx {
62870babebSFan Zhang 	void *private_qp_ctx;
63870babebSFan Zhang 
6488405476SFan Zhang 	uint32_t max_nb_objs;
6588405476SFan Zhang 
668a48e039SFan Zhang 	struct rte_ring *order_ring;
6727595cd8STyler Retzlaff };
68870babebSFan Zhang 
696812b9bfSFan Zhang struct scheduler_session_ctx {
706812b9bfSFan Zhang 	uint32_t ref_cnt;
71e2af4e40SDavid Coyle 	union {
726812b9bfSFan Zhang 		struct rte_cryptodev_sym_session *worker_sess[
736812b9bfSFan Zhang 			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
74e2af4e40SDavid Coyle 		struct rte_security_session *worker_sec_sess[
75e2af4e40SDavid Coyle 			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
76e2af4e40SDavid Coyle 	};
776812b9bfSFan Zhang };
78870babebSFan Zhang 
79520dd992SFerruh Yigit extern uint8_t cryptodev_scheduler_driver_id;
807a364faeSSlawomir Mrozowicz 
81c0583d98SJerin Jacob static __rte_always_inline uint16_t
828a48e039SFan Zhang get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops)
838a48e039SFan Zhang {
848a48e039SFan Zhang 	uint32_t count = rte_ring_free_count(order_ring);
858a48e039SFan Zhang 
868a48e039SFan Zhang 	return count > nb_ops ? nb_ops : count;
878a48e039SFan Zhang }
888a48e039SFan Zhang 
89c0583d98SJerin Jacob static __rte_always_inline void
908a48e039SFan Zhang scheduler_order_insert(struct rte_ring *order_ring,
918a48e039SFan Zhang 		struct rte_crypto_op **ops, uint16_t nb_ops)
928a48e039SFan Zhang {
938a48e039SFan Zhang 	rte_ring_sp_enqueue_burst(order_ring, (void **)ops, nb_ops, NULL);
948a48e039SFan Zhang }
958a48e039SFan Zhang 
96c0583d98SJerin Jacob static __rte_always_inline uint16_t
978a48e039SFan Zhang scheduler_order_drain(struct rte_ring *order_ring,
988a48e039SFan Zhang 		struct rte_crypto_op **ops, uint16_t nb_ops)
998a48e039SFan Zhang {
1008a48e039SFan Zhang 	struct rte_crypto_op *op;
1018b6c9aeeSKonstantin Ananyev 	uint32_t nb_objs, nb_ops_to_deq;
1028a48e039SFan Zhang 
1038b6c9aeeSKonstantin Ananyev 	nb_objs = rte_ring_dequeue_burst_start(order_ring, (void **)ops,
1048b6c9aeeSKonstantin Ananyev 		nb_ops, NULL);
1058b6c9aeeSKonstantin Ananyev 	if (nb_objs == 0)
1068b6c9aeeSKonstantin Ananyev 		return 0;
1078a48e039SFan Zhang 
1088b6c9aeeSKonstantin Ananyev 	for (nb_ops_to_deq = 0; nb_ops_to_deq != nb_objs; nb_ops_to_deq++) {
1098b6c9aeeSKonstantin Ananyev 		op = ops[nb_ops_to_deq];
11026c4ab1aSKirill Rybalchenko 		if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
1118a48e039SFan Zhang 			break;
1128a48e039SFan Zhang 	}
1138a48e039SFan Zhang 
1148b6c9aeeSKonstantin Ananyev 	rte_ring_dequeue_finish(order_ring, nb_ops_to_deq);
1158b6c9aeeSKonstantin Ananyev 	return nb_ops_to_deq;
1168a48e039SFan Zhang }
1178b6c9aeeSKonstantin Ananyev 
1186812b9bfSFan Zhang static __rte_always_inline void
119e2af4e40SDavid Coyle scheduler_set_single_worker_session(struct rte_crypto_op *op,
120e2af4e40SDavid Coyle 		uint8_t worker_idx)
121e2af4e40SDavid Coyle {
122e2af4e40SDavid Coyle 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
123e2af4e40SDavid Coyle 		struct scheduler_session_ctx *sess_ctx =
124e2af4e40SDavid Coyle 				CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
125e2af4e40SDavid Coyle 		op->sym->session = sess_ctx->worker_sess[worker_idx];
126e2af4e40SDavid Coyle 	} else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
127e2af4e40SDavid Coyle 		struct scheduler_session_ctx *sess_ctx =
128e2af4e40SDavid Coyle 				SECURITY_GET_SESS_PRIV(op->sym->session);
129e2af4e40SDavid Coyle 		op->sym->session = sess_ctx->worker_sec_sess[worker_idx];
130e2af4e40SDavid Coyle 	}
131e2af4e40SDavid Coyle }
132e2af4e40SDavid Coyle 
133e2af4e40SDavid Coyle static __rte_always_inline void
134e2af4e40SDavid Coyle scheduler_set_worker_sessions(struct rte_crypto_op **ops, uint16_t nb_ops,
1356812b9bfSFan Zhang 		uint8_t worker_index)
1366812b9bfSFan Zhang {
1376812b9bfSFan Zhang 	struct rte_crypto_op **op = ops;
1386812b9bfSFan Zhang 	uint16_t n = nb_ops;
1396812b9bfSFan Zhang 
1406812b9bfSFan Zhang 	if (n >= 4) {
1416812b9bfSFan Zhang 		rte_prefetch0(op[0]->sym->session);
1426812b9bfSFan Zhang 		rte_prefetch0(op[1]->sym->session);
1436812b9bfSFan Zhang 		rte_prefetch0(op[2]->sym->session);
1446812b9bfSFan Zhang 		rte_prefetch0(op[3]->sym->session);
1456812b9bfSFan Zhang 	}
1466812b9bfSFan Zhang 
1476812b9bfSFan Zhang 	while (n >= 4) {
1486812b9bfSFan Zhang 		if (n >= 8) {
1496812b9bfSFan Zhang 			rte_prefetch0(op[4]->sym->session);
1506812b9bfSFan Zhang 			rte_prefetch0(op[5]->sym->session);
1516812b9bfSFan Zhang 			rte_prefetch0(op[6]->sym->session);
1526812b9bfSFan Zhang 			rte_prefetch0(op[7]->sym->session);
1536812b9bfSFan Zhang 		}
1546812b9bfSFan Zhang 
155e2af4e40SDavid Coyle 		scheduler_set_single_worker_session(op[0], worker_index);
156e2af4e40SDavid Coyle 		scheduler_set_single_worker_session(op[1], worker_index);
157e2af4e40SDavid Coyle 		scheduler_set_single_worker_session(op[2], worker_index);
158e2af4e40SDavid Coyle 		scheduler_set_single_worker_session(op[3], worker_index);
1596812b9bfSFan Zhang 
1606812b9bfSFan Zhang 		op += 4;
1616812b9bfSFan Zhang 		n -= 4;
1626812b9bfSFan Zhang 	}
1636812b9bfSFan Zhang 
1646812b9bfSFan Zhang 	while (n--) {
165e2af4e40SDavid Coyle 		scheduler_set_single_worker_session(op[0], worker_index);
1666812b9bfSFan Zhang 		op++;
1676812b9bfSFan Zhang 	}
1686812b9bfSFan Zhang }
1696812b9bfSFan Zhang 
1706812b9bfSFan Zhang static __rte_always_inline void
171e2af4e40SDavid Coyle scheduler_retrieve_single_session(struct rte_crypto_op *op)
172e2af4e40SDavid Coyle {
173e2af4e40SDavid Coyle 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
174e2af4e40SDavid Coyle 		op->sym->session = (void *)(uintptr_t)
175e2af4e40SDavid Coyle 			rte_cryptodev_sym_session_opaque_data_get(op->sym->session);
176e2af4e40SDavid Coyle 	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
177e2af4e40SDavid Coyle 		op->sym->session = (void *)(uintptr_t)
178e2af4e40SDavid Coyle 			rte_security_session_opaque_data_get(op->sym->session);
179e2af4e40SDavid Coyle }
180e2af4e40SDavid Coyle 
181e2af4e40SDavid Coyle static __rte_always_inline void
182e2af4e40SDavid Coyle scheduler_retrieve_sessions(struct rte_crypto_op **ops, uint16_t nb_ops)
1836812b9bfSFan Zhang {
1846812b9bfSFan Zhang 	uint16_t n = nb_ops;
1856812b9bfSFan Zhang 	struct rte_crypto_op **op = ops;
1866812b9bfSFan Zhang 
1876812b9bfSFan Zhang 	if (n >= 4) {
1886812b9bfSFan Zhang 		rte_prefetch0(op[0]->sym->session);
1896812b9bfSFan Zhang 		rte_prefetch0(op[1]->sym->session);
1906812b9bfSFan Zhang 		rte_prefetch0(op[2]->sym->session);
1916812b9bfSFan Zhang 		rte_prefetch0(op[3]->sym->session);
1926812b9bfSFan Zhang 	}
1936812b9bfSFan Zhang 
1946812b9bfSFan Zhang 	while (n >= 4) {
1956812b9bfSFan Zhang 		if (n >= 8) {
1966812b9bfSFan Zhang 			rte_prefetch0(op[4]->sym->session);
1976812b9bfSFan Zhang 			rte_prefetch0(op[5]->sym->session);
1986812b9bfSFan Zhang 			rte_prefetch0(op[6]->sym->session);
1996812b9bfSFan Zhang 			rte_prefetch0(op[7]->sym->session);
2006812b9bfSFan Zhang 		}
2016812b9bfSFan Zhang 
202e2af4e40SDavid Coyle 		scheduler_retrieve_single_session(op[0]);
203e2af4e40SDavid Coyle 		scheduler_retrieve_single_session(op[1]);
204e2af4e40SDavid Coyle 		scheduler_retrieve_single_session(op[2]);
205e2af4e40SDavid Coyle 		scheduler_retrieve_single_session(op[3]);
2066812b9bfSFan Zhang 
2076812b9bfSFan Zhang 		op += 4;
2086812b9bfSFan Zhang 		n -= 4;
2096812b9bfSFan Zhang 	}
2106812b9bfSFan Zhang 
2116812b9bfSFan Zhang 	while (n--) {
212e2af4e40SDavid Coyle 		scheduler_retrieve_single_session(op[0]);
2136812b9bfSFan Zhang 		op++;
2146812b9bfSFan Zhang 	}
2156812b9bfSFan Zhang }
2166812b9bfSFan Zhang 
217e2af4e40SDavid Coyle static __rte_always_inline uint32_t
218e2af4e40SDavid Coyle scheduler_get_job_len(struct rte_crypto_op *op)
219e2af4e40SDavid Coyle {
220e2af4e40SDavid Coyle 	uint32_t job_len;
221e2af4e40SDavid Coyle 
222e2af4e40SDavid Coyle 	/* op_len is initialized as cipher data length, if
223e2af4e40SDavid Coyle 	 * it is 0, then it is set to auth data length
224e2af4e40SDavid Coyle 	 */
225e2af4e40SDavid Coyle 	job_len = op->sym->cipher.data.length;
226e2af4e40SDavid Coyle 	job_len += (op->sym->cipher.data.length == 0) *
227e2af4e40SDavid Coyle 					op->sym->auth.data.length;
228e2af4e40SDavid Coyle 
229e2af4e40SDavid Coyle 	return job_len;
230e2af4e40SDavid Coyle }
231e2af4e40SDavid Coyle 
232e2af4e40SDavid Coyle static __rte_always_inline void
233e2af4e40SDavid Coyle scheduler_free_capabilities(struct scheduler_ctx *sched_ctx)
234e2af4e40SDavid Coyle {
235e2af4e40SDavid Coyle 	uint32_t i;
236e2af4e40SDavid Coyle 
237e2af4e40SDavid Coyle 	rte_free(sched_ctx->capabilities);
238e2af4e40SDavid Coyle 	sched_ctx->capabilities = NULL;
239e2af4e40SDavid Coyle 
240e2af4e40SDavid Coyle 	if (sched_ctx->sec_crypto_capabilities) {
241e2af4e40SDavid Coyle 		i = 0;
242e2af4e40SDavid Coyle 		while (sched_ctx->sec_crypto_capabilities[i] != NULL) {
243e2af4e40SDavid Coyle 			rte_free(sched_ctx->sec_crypto_capabilities[i]);
244e2af4e40SDavid Coyle 			sched_ctx->sec_crypto_capabilities[i] = NULL;
245e2af4e40SDavid Coyle 			i++;
246e2af4e40SDavid Coyle 		}
247e2af4e40SDavid Coyle 
248e2af4e40SDavid Coyle 		rte_free(sched_ctx->sec_crypto_capabilities);
249e2af4e40SDavid Coyle 		sched_ctx->sec_crypto_capabilities = NULL;
250e2af4e40SDavid Coyle 	}
251e2af4e40SDavid Coyle 
252e2af4e40SDavid Coyle 	rte_free(sched_ctx->sec_capabilities);
253e2af4e40SDavid Coyle 	sched_ctx->sec_capabilities = NULL;
254e2af4e40SDavid Coyle }
255e2af4e40SDavid Coyle 
256e2af4e40SDavid Coyle static __rte_always_inline int
257e2af4e40SDavid Coyle scheduler_check_sec_proto_supp(enum rte_security_session_action_type action,
258e2af4e40SDavid Coyle 		enum rte_security_session_protocol protocol)
259e2af4e40SDavid Coyle {
260e2af4e40SDavid Coyle 	if (action == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL &&
261e2af4e40SDavid Coyle 			protocol == RTE_SECURITY_PROTOCOL_DOCSIS)
262e2af4e40SDavid Coyle 		return 1;
263e2af4e40SDavid Coyle 
264e2af4e40SDavid Coyle 	return 0;
265e2af4e40SDavid Coyle }
266e2af4e40SDavid Coyle 
267870babebSFan Zhang /** device specific operations function pointer structure */
268870babebSFan Zhang extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;
269e2af4e40SDavid Coyle extern struct rte_security_ops *rte_crypto_scheduler_pmd_sec_ops;
270870babebSFan Zhang 
271870babebSFan Zhang #endif /* _SCHEDULER_PMD_PRIVATE_H */
272