xref: /dpdk/drivers/crypto/scheduler/scheduler_pmd_private.h (revision 2b843cac232eb3f2fa79e4254e21766817e2019f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #ifndef _SCHEDULER_PMD_PRIVATE_H
6 #define _SCHEDULER_PMD_PRIVATE_H
7 
8 #include <rte_security_driver.h>
9 
10 #include "rte_cryptodev_scheduler.h"
11 
12 #define CRYPTODEV_NAME_SCHEDULER_PMD	crypto_scheduler
13 /**< Scheduler Crypto PMD device name */
14 
15 #define PER_WORKER_BUFF_SIZE			(256)
16 
17 extern int scheduler_logtype_driver;
18 #define RTE_LOGTYPE_SCHEDULER_DRIVER scheduler_logtype_driver
19 
20 #define CR_SCHED_LOG(level, ...) \
21 	RTE_LOG_LINE_PREFIX(level, SCHEDULER_DRIVER, "%s() line %u: ", \
22 		__func__ RTE_LOG_COMMA __LINE__, __VA_ARGS__)
23 
24 struct scheduler_worker {
25 	uint8_t dev_id;
26 	uint16_t qp_id;
27 	uint32_t nb_inflight_cops;
28 	uint8_t driver_id;
29 };
30 
31 struct __rte_cache_aligned scheduler_ctx {
32 	void *private_ctx;
33 	/**< private scheduler context pointer */
34 
35 	struct rte_cryptodev_capabilities *capabilities;
36 	struct rte_security_capability *sec_capabilities;
37 	struct rte_cryptodev_capabilities **sec_crypto_capabilities;
38 
39 	uint32_t max_nb_queue_pairs;
40 
41 	struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
42 	uint32_t nb_workers;
43 	/* reference count when the workers are incremented/decremented */
44 	uint32_t ref_cnt;
45 
46 	enum rte_cryptodev_scheduler_mode mode;
47 
48 	struct rte_cryptodev_scheduler_ops ops;
49 
50 	uint8_t reordering_enabled;
51 
52 	char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
53 	char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
54 	uint16_t wc_pool[RTE_MAX_LCORE];
55 	uint16_t nb_wc;
56 
57 	char *init_worker_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
58 	int nb_init_workers;
59 };
60 
61 struct __rte_cache_aligned scheduler_qp_ctx {
62 	void *private_qp_ctx;
63 
64 	uint32_t max_nb_objs;
65 
66 	struct rte_ring *order_ring;
67 };
68 
69 struct scheduler_session_ctx {
70 	uint32_t ref_cnt;
71 	union {
72 		struct rte_cryptodev_sym_session *worker_sess[
73 			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
74 		struct rte_security_session *worker_sec_sess[
75 			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
76 	};
77 };
78 
79 extern uint8_t cryptodev_scheduler_driver_id;
80 
81 static __rte_always_inline uint16_t
82 get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops)
83 {
84 	uint32_t count = rte_ring_free_count(order_ring);
85 
86 	return count > nb_ops ? nb_ops : count;
87 }
88 
89 static __rte_always_inline void
90 scheduler_order_insert(struct rte_ring *order_ring,
91 		struct rte_crypto_op **ops, uint16_t nb_ops)
92 {
93 	rte_ring_sp_enqueue_burst(order_ring, (void **)ops, nb_ops, NULL);
94 }
95 
96 static __rte_always_inline uint16_t
97 scheduler_order_drain(struct rte_ring *order_ring,
98 		struct rte_crypto_op **ops, uint16_t nb_ops)
99 {
100 	struct rte_crypto_op *op;
101 	uint32_t nb_objs, nb_ops_to_deq;
102 
103 	nb_objs = rte_ring_dequeue_burst_start(order_ring, (void **)ops,
104 		nb_ops, NULL);
105 	if (nb_objs == 0)
106 		return 0;
107 
108 	for (nb_ops_to_deq = 0; nb_ops_to_deq != nb_objs; nb_ops_to_deq++) {
109 		op = ops[nb_ops_to_deq];
110 		if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
111 			break;
112 	}
113 
114 	rte_ring_dequeue_finish(order_ring, nb_ops_to_deq);
115 	return nb_ops_to_deq;
116 }
117 
118 static __rte_always_inline void
119 scheduler_set_single_worker_session(struct rte_crypto_op *op,
120 		uint8_t worker_idx)
121 {
122 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
123 		struct scheduler_session_ctx *sess_ctx =
124 				CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
125 		op->sym->session = sess_ctx->worker_sess[worker_idx];
126 	} else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
127 		struct scheduler_session_ctx *sess_ctx =
128 				SECURITY_GET_SESS_PRIV(op->sym->session);
129 		op->sym->session = sess_ctx->worker_sec_sess[worker_idx];
130 	}
131 }
132 
133 static __rte_always_inline void
134 scheduler_set_worker_sessions(struct rte_crypto_op **ops, uint16_t nb_ops,
135 		uint8_t worker_index)
136 {
137 	struct rte_crypto_op **op = ops;
138 	uint16_t n = nb_ops;
139 
140 	if (n >= 4) {
141 		rte_prefetch0(op[0]->sym->session);
142 		rte_prefetch0(op[1]->sym->session);
143 		rte_prefetch0(op[2]->sym->session);
144 		rte_prefetch0(op[3]->sym->session);
145 	}
146 
147 	while (n >= 4) {
148 		if (n >= 8) {
149 			rte_prefetch0(op[4]->sym->session);
150 			rte_prefetch0(op[5]->sym->session);
151 			rte_prefetch0(op[6]->sym->session);
152 			rte_prefetch0(op[7]->sym->session);
153 		}
154 
155 		scheduler_set_single_worker_session(op[0], worker_index);
156 		scheduler_set_single_worker_session(op[1], worker_index);
157 		scheduler_set_single_worker_session(op[2], worker_index);
158 		scheduler_set_single_worker_session(op[3], worker_index);
159 
160 		op += 4;
161 		n -= 4;
162 	}
163 
164 	while (n--) {
165 		scheduler_set_single_worker_session(op[0], worker_index);
166 		op++;
167 	}
168 }
169 
170 static __rte_always_inline void
171 scheduler_retrieve_single_session(struct rte_crypto_op *op)
172 {
173 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
174 		op->sym->session = (void *)(uintptr_t)
175 			rte_cryptodev_sym_session_opaque_data_get(op->sym->session);
176 	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
177 		op->sym->session = (void *)(uintptr_t)
178 			rte_security_session_opaque_data_get(op->sym->session);
179 }
180 
181 static __rte_always_inline void
182 scheduler_retrieve_sessions(struct rte_crypto_op **ops, uint16_t nb_ops)
183 {
184 	uint16_t n = nb_ops;
185 	struct rte_crypto_op **op = ops;
186 
187 	if (n >= 4) {
188 		rte_prefetch0(op[0]->sym->session);
189 		rte_prefetch0(op[1]->sym->session);
190 		rte_prefetch0(op[2]->sym->session);
191 		rte_prefetch0(op[3]->sym->session);
192 	}
193 
194 	while (n >= 4) {
195 		if (n >= 8) {
196 			rte_prefetch0(op[4]->sym->session);
197 			rte_prefetch0(op[5]->sym->session);
198 			rte_prefetch0(op[6]->sym->session);
199 			rte_prefetch0(op[7]->sym->session);
200 		}
201 
202 		scheduler_retrieve_single_session(op[0]);
203 		scheduler_retrieve_single_session(op[1]);
204 		scheduler_retrieve_single_session(op[2]);
205 		scheduler_retrieve_single_session(op[3]);
206 
207 		op += 4;
208 		n -= 4;
209 	}
210 
211 	while (n--) {
212 		scheduler_retrieve_single_session(op[0]);
213 		op++;
214 	}
215 }
216 
217 static __rte_always_inline uint32_t
218 scheduler_get_job_len(struct rte_crypto_op *op)
219 {
220 	uint32_t job_len;
221 
222 	/* op_len is initialized as cipher data length, if
223 	 * it is 0, then it is set to auth data length
224 	 */
225 	job_len = op->sym->cipher.data.length;
226 	job_len += (op->sym->cipher.data.length == 0) *
227 					op->sym->auth.data.length;
228 
229 	return job_len;
230 }
231 
232 static __rte_always_inline void
233 scheduler_free_capabilities(struct scheduler_ctx *sched_ctx)
234 {
235 	uint32_t i;
236 
237 	rte_free(sched_ctx->capabilities);
238 	sched_ctx->capabilities = NULL;
239 
240 	if (sched_ctx->sec_crypto_capabilities) {
241 		i = 0;
242 		while (sched_ctx->sec_crypto_capabilities[i] != NULL) {
243 			rte_free(sched_ctx->sec_crypto_capabilities[i]);
244 			sched_ctx->sec_crypto_capabilities[i] = NULL;
245 			i++;
246 		}
247 
248 		rte_free(sched_ctx->sec_crypto_capabilities);
249 		sched_ctx->sec_crypto_capabilities = NULL;
250 	}
251 
252 	rte_free(sched_ctx->sec_capabilities);
253 	sched_ctx->sec_capabilities = NULL;
254 }
255 
256 static __rte_always_inline int
257 scheduler_check_sec_proto_supp(enum rte_security_session_action_type action,
258 		enum rte_security_session_protocol protocol)
259 {
260 	if (action == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL &&
261 			protocol == RTE_SECURITY_PROTOCOL_DOCSIS)
262 		return 1;
263 
264 	return 0;
265 }
266 
267 /** device specific operations function pointer structure */
268 extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;
269 extern struct rte_security_ops *rte_crypto_scheduler_pmd_sec_ops;
270 
271 #endif /* _SCHEDULER_PMD_PRIVATE_H */
272