xref: /dpdk/drivers/crypto/scheduler/scheduler_pmd_private.h (revision 54140461b60485941da282d8da2db2f2bc19e281)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #ifndef _SCHEDULER_PMD_PRIVATE_H
6 #define _SCHEDULER_PMD_PRIVATE_H
7 
8 #include <rte_security_driver.h>
9 
10 #include "rte_cryptodev_scheduler.h"
11 
12 #define CRYPTODEV_NAME_SCHEDULER_PMD	crypto_scheduler
13 /**< Scheduler Crypto PMD device name */
14 
15 #define PER_WORKER_BUFF_SIZE			(256)
16 
17 extern int scheduler_logtype_driver;
18 
19 #define CR_SCHED_LOG(level, fmt, args...) \
20 	rte_log(RTE_LOG_ ## level, scheduler_logtype_driver,		\
21 			"%s() line %u: "fmt "\n", __func__, __LINE__, ##args)
22 
23 struct scheduler_worker {
24 	uint8_t dev_id;
25 	uint16_t qp_id;
26 	uint32_t nb_inflight_cops;
27 	uint8_t driver_id;
28 };
29 
30 struct scheduler_ctx {
31 	void *private_ctx;
32 	/**< private scheduler context pointer */
33 
34 	struct rte_cryptodev_capabilities *capabilities;
35 	struct rte_security_capability *sec_capabilities;
36 	struct rte_cryptodev_capabilities **sec_crypto_capabilities;
37 
38 	uint32_t max_nb_queue_pairs;
39 
40 	struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
41 	uint32_t nb_workers;
42 	/* reference count when the workers are incremented/decremented */
43 	uint32_t ref_cnt;
44 
45 	enum rte_cryptodev_scheduler_mode mode;
46 
47 	struct rte_cryptodev_scheduler_ops ops;
48 
49 	uint8_t reordering_enabled;
50 
51 	char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
52 	char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
53 	uint16_t wc_pool[RTE_MAX_LCORE];
54 	uint16_t nb_wc;
55 
56 	char *init_worker_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
57 	int nb_init_workers;
58 } __rte_cache_aligned;
59 
60 struct scheduler_qp_ctx {
61 	void *private_qp_ctx;
62 
63 	uint32_t max_nb_objs;
64 
65 	struct rte_ring *order_ring;
66 } __rte_cache_aligned;
67 
68 struct scheduler_session_ctx {
69 	uint32_t ref_cnt;
70 	union {
71 		struct rte_cryptodev_sym_session *worker_sess[
72 			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
73 		struct rte_security_session *worker_sec_sess[
74 			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
75 	};
76 };
77 
78 extern uint8_t cryptodev_scheduler_driver_id;
79 
80 static __rte_always_inline uint16_t
81 get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops)
82 {
83 	uint32_t count = rte_ring_free_count(order_ring);
84 
85 	return count > nb_ops ? nb_ops : count;
86 }
87 
88 static __rte_always_inline void
89 scheduler_order_insert(struct rte_ring *order_ring,
90 		struct rte_crypto_op **ops, uint16_t nb_ops)
91 {
92 	rte_ring_sp_enqueue_burst(order_ring, (void **)ops, nb_ops, NULL);
93 }
94 
95 static __rte_always_inline uint16_t
96 scheduler_order_drain(struct rte_ring *order_ring,
97 		struct rte_crypto_op **ops, uint16_t nb_ops)
98 {
99 	struct rte_crypto_op *op;
100 	uint32_t nb_objs, nb_ops_to_deq;
101 
102 	nb_objs = rte_ring_dequeue_burst_start(order_ring, (void **)ops,
103 		nb_ops, NULL);
104 	if (nb_objs == 0)
105 		return 0;
106 
107 	for (nb_ops_to_deq = 0; nb_ops_to_deq != nb_objs; nb_ops_to_deq++) {
108 		op = ops[nb_ops_to_deq];
109 		if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
110 			break;
111 	}
112 
113 	rte_ring_dequeue_finish(order_ring, nb_ops_to_deq);
114 	return nb_ops_to_deq;
115 }
116 
117 static __rte_always_inline void
118 scheduler_set_single_worker_session(struct rte_crypto_op *op,
119 		uint8_t worker_idx)
120 {
121 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
122 		struct scheduler_session_ctx *sess_ctx =
123 				CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
124 		op->sym->session = sess_ctx->worker_sess[worker_idx];
125 	} else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
126 		struct scheduler_session_ctx *sess_ctx =
127 				SECURITY_GET_SESS_PRIV(op->sym->session);
128 		op->sym->session = sess_ctx->worker_sec_sess[worker_idx];
129 	}
130 }
131 
132 static __rte_always_inline void
133 scheduler_set_worker_sessions(struct rte_crypto_op **ops, uint16_t nb_ops,
134 		uint8_t worker_index)
135 {
136 	struct rte_crypto_op **op = ops;
137 	uint16_t n = nb_ops;
138 
139 	if (n >= 4) {
140 		rte_prefetch0(op[0]->sym->session);
141 		rte_prefetch0(op[1]->sym->session);
142 		rte_prefetch0(op[2]->sym->session);
143 		rte_prefetch0(op[3]->sym->session);
144 	}
145 
146 	while (n >= 4) {
147 		if (n >= 8) {
148 			rte_prefetch0(op[4]->sym->session);
149 			rte_prefetch0(op[5]->sym->session);
150 			rte_prefetch0(op[6]->sym->session);
151 			rte_prefetch0(op[7]->sym->session);
152 		}
153 
154 		scheduler_set_single_worker_session(op[0], worker_index);
155 		scheduler_set_single_worker_session(op[1], worker_index);
156 		scheduler_set_single_worker_session(op[2], worker_index);
157 		scheduler_set_single_worker_session(op[3], worker_index);
158 
159 		op += 4;
160 		n -= 4;
161 	}
162 
163 	while (n--) {
164 		scheduler_set_single_worker_session(op[0], worker_index);
165 		op++;
166 	}
167 }
168 
169 static __rte_always_inline void
170 scheduler_retrieve_single_session(struct rte_crypto_op *op)
171 {
172 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
173 		op->sym->session = (void *)(uintptr_t)
174 			rte_cryptodev_sym_session_opaque_data_get(op->sym->session);
175 	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
176 		op->sym->session = (void *)(uintptr_t)
177 			rte_security_session_opaque_data_get(op->sym->session);
178 }
179 
180 static __rte_always_inline void
181 scheduler_retrieve_sessions(struct rte_crypto_op **ops, uint16_t nb_ops)
182 {
183 	uint16_t n = nb_ops;
184 	struct rte_crypto_op **op = ops;
185 
186 	if (n >= 4) {
187 		rte_prefetch0(op[0]->sym->session);
188 		rte_prefetch0(op[1]->sym->session);
189 		rte_prefetch0(op[2]->sym->session);
190 		rte_prefetch0(op[3]->sym->session);
191 	}
192 
193 	while (n >= 4) {
194 		if (n >= 8) {
195 			rte_prefetch0(op[4]->sym->session);
196 			rte_prefetch0(op[5]->sym->session);
197 			rte_prefetch0(op[6]->sym->session);
198 			rte_prefetch0(op[7]->sym->session);
199 		}
200 
201 		scheduler_retrieve_single_session(op[0]);
202 		scheduler_retrieve_single_session(op[1]);
203 		scheduler_retrieve_single_session(op[2]);
204 		scheduler_retrieve_single_session(op[3]);
205 
206 		op += 4;
207 		n -= 4;
208 	}
209 
210 	while (n--) {
211 		scheduler_retrieve_single_session(op[0]);
212 		op++;
213 	}
214 }
215 
216 static __rte_always_inline uint32_t
217 scheduler_get_job_len(struct rte_crypto_op *op)
218 {
219 	uint32_t job_len;
220 
221 	/* op_len is initialized as cipher data length, if
222 	 * it is 0, then it is set to auth data length
223 	 */
224 	job_len = op->sym->cipher.data.length;
225 	job_len += (op->sym->cipher.data.length == 0) *
226 					op->sym->auth.data.length;
227 
228 	return job_len;
229 }
230 
231 static __rte_always_inline void
232 scheduler_free_capabilities(struct scheduler_ctx *sched_ctx)
233 {
234 	uint32_t i;
235 
236 	rte_free(sched_ctx->capabilities);
237 	sched_ctx->capabilities = NULL;
238 
239 	if (sched_ctx->sec_crypto_capabilities) {
240 		i = 0;
241 		while (sched_ctx->sec_crypto_capabilities[i] != NULL) {
242 			rte_free(sched_ctx->sec_crypto_capabilities[i]);
243 			sched_ctx->sec_crypto_capabilities[i] = NULL;
244 			i++;
245 		}
246 
247 		rte_free(sched_ctx->sec_crypto_capabilities);
248 		sched_ctx->sec_crypto_capabilities = NULL;
249 	}
250 
251 	rte_free(sched_ctx->sec_capabilities);
252 	sched_ctx->sec_capabilities = NULL;
253 }
254 
255 static __rte_always_inline int
256 scheduler_check_sec_proto_supp(enum rte_security_session_action_type action,
257 		enum rte_security_session_protocol protocol)
258 {
259 	if (action == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL &&
260 			protocol == RTE_SECURITY_PROTOCOL_DOCSIS)
261 		return 1;
262 
263 	return 0;
264 }
265 
266 /** device specific operations function pointer structure */
267 extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;
268 extern struct rte_security_ops *rte_crypto_scheduler_pmd_sec_ops;
269 
270 #endif /* _SCHEDULER_PMD_PRIVATE_H */
271