xref: /dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c (revision b53d106d34b5c638f5a2cbdfee0da5bd42d4383f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 #include <string.h>
5 
6 #include <rte_common.h>
7 #include <rte_malloc.h>
8 #include <rte_dev.h>
9 #include <rte_cryptodev.h>
10 #include <cryptodev_pmd.h>
11 #include <rte_reorder.h>
12 
13 #include "scheduler_pmd_private.h"
14 
15 /** attaching the workers predefined by scheduler's EAL options */
16 static int
17 scheduler_attach_init_worker(struct rte_cryptodev *dev)
18 {
19 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
20 	uint8_t scheduler_id = dev->data->dev_id;
21 	int i;
22 
23 	for (i = sched_ctx->nb_init_workers - 1; i >= 0; i--) {
24 		const char *dev_name = sched_ctx->init_worker_names[i];
25 		struct rte_cryptodev *worker_dev =
26 				rte_cryptodev_pmd_get_named_dev(dev_name);
27 		int status;
28 
29 		if (!worker_dev) {
30 			CR_SCHED_LOG(ERR, "Failed to locate worker dev %s",
31 					dev_name);
32 			return -EINVAL;
33 		}
34 
35 		status = rte_cryptodev_scheduler_worker_attach(
36 				scheduler_id, worker_dev->data->dev_id);
37 
38 		if (status < 0) {
39 			CR_SCHED_LOG(ERR, "Failed to attach worker cryptodev %u",
40 					worker_dev->data->dev_id);
41 			return status;
42 		}
43 
44 		CR_SCHED_LOG(INFO, "Scheduler %s attached worker %s",
45 				dev->data->name,
46 				sched_ctx->init_worker_names[i]);
47 
48 		rte_free(sched_ctx->init_worker_names[i]);
49 		sched_ctx->init_worker_names[i] = NULL;
50 
51 		sched_ctx->nb_init_workers -= 1;
52 	}
53 
54 	return 0;
55 }
56 /** Configure device */
57 static int
58 scheduler_pmd_config(struct rte_cryptodev *dev,
59 		struct rte_cryptodev_config *config)
60 {
61 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
62 	uint32_t i;
63 	int ret;
64 
65 	/* although scheduler_attach_init_worker presents multiple times,
66 	 * there will be only 1 meaningful execution.
67 	 */
68 	ret = scheduler_attach_init_worker(dev);
69 	if (ret < 0)
70 		return ret;
71 
72 	for (i = 0; i < sched_ctx->nb_workers; i++) {
73 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
74 
75 		ret = rte_cryptodev_configure(worker_dev_id, config);
76 		if (ret < 0)
77 			break;
78 	}
79 
80 	return ret;
81 }
82 
83 static int
84 update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
85 {
86 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
87 	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
88 
89 	if (sched_ctx->reordering_enabled) {
90 		char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
91 		uint32_t buff_size = rte_align32pow2(
92 			sched_ctx->nb_workers * PER_WORKER_BUFF_SIZE);
93 
94 		if (qp_ctx->order_ring) {
95 			rte_ring_free(qp_ctx->order_ring);
96 			qp_ctx->order_ring = NULL;
97 		}
98 
99 		if (!buff_size)
100 			return 0;
101 
102 		if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN,
103 			"%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
104 			dev->data->dev_id, qp_id) < 0) {
105 			CR_SCHED_LOG(ERR, "failed to create unique reorder buffer"
106 					"name");
107 			return -ENOMEM;
108 		}
109 
110 		qp_ctx->order_ring = rte_ring_create(order_ring_name,
111 				buff_size, rte_socket_id(),
112 				RING_F_SP_ENQ | RING_F_SC_DEQ);
113 		if (!qp_ctx->order_ring) {
114 			CR_SCHED_LOG(ERR, "failed to create order ring");
115 			return -ENOMEM;
116 		}
117 	} else {
118 		if (qp_ctx->order_ring) {
119 			rte_ring_free(qp_ctx->order_ring);
120 			qp_ctx->order_ring = NULL;
121 		}
122 	}
123 
124 	return 0;
125 }
126 
127 /** Start device */
128 static int
129 scheduler_pmd_start(struct rte_cryptodev *dev)
130 {
131 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
132 	uint32_t i;
133 	int ret;
134 
135 	if (dev->data->dev_started)
136 		return 0;
137 
138 	/* although scheduler_attach_init_worker presents multiple times,
139 	 * there will be only 1 meaningful execution.
140 	 */
141 	ret = scheduler_attach_init_worker(dev);
142 	if (ret < 0)
143 		return ret;
144 
145 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
146 		ret = update_order_ring(dev, i);
147 		if (ret < 0) {
148 			CR_SCHED_LOG(ERR, "Failed to update reorder buffer");
149 			return ret;
150 		}
151 	}
152 
153 	if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) {
154 		CR_SCHED_LOG(ERR, "Scheduler mode is not set");
155 		return -1;
156 	}
157 
158 	if (!sched_ctx->nb_workers) {
159 		CR_SCHED_LOG(ERR, "No worker in the scheduler");
160 		return -1;
161 	}
162 
163 	RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.worker_attach, -ENOTSUP);
164 
165 	for (i = 0; i < sched_ctx->nb_workers; i++) {
166 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
167 
168 		if ((*sched_ctx->ops.worker_attach)(dev, worker_dev_id) < 0) {
169 			CR_SCHED_LOG(ERR, "Failed to attach worker");
170 			return -ENOTSUP;
171 		}
172 	}
173 
174 	RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP);
175 
176 	if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
177 		CR_SCHED_LOG(ERR, "Scheduler start failed");
178 		return -1;
179 	}
180 
181 	/* start all workers */
182 	for (i = 0; i < sched_ctx->nb_workers; i++) {
183 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
184 		ret = rte_cryptodev_start(worker_dev_id);
185 		if (ret < 0) {
186 			CR_SCHED_LOG(ERR, "Failed to start worker dev %u",
187 					worker_dev_id);
188 			return ret;
189 		}
190 	}
191 
192 	return 0;
193 }
194 
195 /** Stop device */
196 static void
197 scheduler_pmd_stop(struct rte_cryptodev *dev)
198 {
199 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
200 	uint32_t i;
201 
202 	if (!dev->data->dev_started)
203 		return;
204 
205 	/* stop all workers first */
206 	for (i = 0; i < sched_ctx->nb_workers; i++) {
207 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
208 
209 		rte_cryptodev_stop(worker_dev_id);
210 	}
211 
212 	if (*sched_ctx->ops.scheduler_stop)
213 		(*sched_ctx->ops.scheduler_stop)(dev);
214 
215 	for (i = 0; i < sched_ctx->nb_workers; i++) {
216 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
217 
218 		if (*sched_ctx->ops.worker_detach)
219 			(*sched_ctx->ops.worker_detach)(dev, worker_dev_id);
220 	}
221 }
222 
223 /** Close device */
224 static int
225 scheduler_pmd_close(struct rte_cryptodev *dev)
226 {
227 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
228 	uint32_t i;
229 	int ret;
230 
231 	/* the dev should be stopped before being closed */
232 	if (dev->data->dev_started)
233 		return -EBUSY;
234 
235 	/* close all workers first */
236 	for (i = 0; i < sched_ctx->nb_workers; i++) {
237 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
238 		struct rte_cryptodev *worker_dev =
239 				rte_cryptodev_pmd_get_dev(worker_dev_id);
240 
241 		ret = (*worker_dev->dev_ops->dev_close)(worker_dev);
242 		if (ret < 0)
243 			return ret;
244 	}
245 
246 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
247 		struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
248 
249 		if (qp_ctx->order_ring) {
250 			rte_ring_free(qp_ctx->order_ring);
251 			qp_ctx->order_ring = NULL;
252 		}
253 
254 		if (qp_ctx->private_qp_ctx) {
255 			rte_free(qp_ctx->private_qp_ctx);
256 			qp_ctx->private_qp_ctx = NULL;
257 		}
258 	}
259 
260 	if (sched_ctx->private_ctx) {
261 		rte_free(sched_ctx->private_ctx);
262 		sched_ctx->private_ctx = NULL;
263 	}
264 
265 	if (sched_ctx->capabilities) {
266 		rte_free(sched_ctx->capabilities);
267 		sched_ctx->capabilities = NULL;
268 	}
269 
270 	return 0;
271 }
272 
273 /** Get device statistics */
274 static void
275 scheduler_pmd_stats_get(struct rte_cryptodev *dev,
276 	struct rte_cryptodev_stats *stats)
277 {
278 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
279 	uint32_t i;
280 
281 	for (i = 0; i < sched_ctx->nb_workers; i++) {
282 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
283 		struct rte_cryptodev *worker_dev =
284 				rte_cryptodev_pmd_get_dev(worker_dev_id);
285 		struct rte_cryptodev_stats worker_stats = {0};
286 
287 		(*worker_dev->dev_ops->stats_get)(worker_dev, &worker_stats);
288 
289 		stats->enqueued_count += worker_stats.enqueued_count;
290 		stats->dequeued_count += worker_stats.dequeued_count;
291 
292 		stats->enqueue_err_count += worker_stats.enqueue_err_count;
293 		stats->dequeue_err_count += worker_stats.dequeue_err_count;
294 	}
295 }
296 
297 /** Reset device statistics */
298 static void
299 scheduler_pmd_stats_reset(struct rte_cryptodev *dev)
300 {
301 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
302 	uint32_t i;
303 
304 	for (i = 0; i < sched_ctx->nb_workers; i++) {
305 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
306 		struct rte_cryptodev *worker_dev =
307 				rte_cryptodev_pmd_get_dev(worker_dev_id);
308 
309 		(*worker_dev->dev_ops->stats_reset)(worker_dev);
310 	}
311 }
312 
313 /** Get device info */
314 static void
315 scheduler_pmd_info_get(struct rte_cryptodev *dev,
316 		struct rte_cryptodev_info *dev_info)
317 {
318 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
319 	uint32_t max_nb_sess = 0;
320 	uint16_t headroom_sz = 0;
321 	uint16_t tailroom_sz = 0;
322 	uint32_t i;
323 
324 	if (!dev_info)
325 		return;
326 
327 	/* although scheduler_attach_init_worker presents multiple times,
328 	 * there will be only 1 meaningful execution.
329 	 */
330 	scheduler_attach_init_worker(dev);
331 
332 	for (i = 0; i < sched_ctx->nb_workers; i++) {
333 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
334 		struct rte_cryptodev_info worker_info;
335 
336 		rte_cryptodev_info_get(worker_dev_id, &worker_info);
337 		uint32_t dev_max_sess = worker_info.sym.max_nb_sessions;
338 		if (dev_max_sess != 0) {
339 			if (max_nb_sess == 0 ||	dev_max_sess < max_nb_sess)
340 				max_nb_sess = worker_info.sym.max_nb_sessions;
341 		}
342 
343 		/* Get the max headroom requirement among worker PMDs */
344 		headroom_sz = worker_info.min_mbuf_headroom_req >
345 				headroom_sz ?
346 				worker_info.min_mbuf_headroom_req :
347 				headroom_sz;
348 
349 		/* Get the max tailroom requirement among worker PMDs */
350 		tailroom_sz = worker_info.min_mbuf_tailroom_req >
351 				tailroom_sz ?
352 				worker_info.min_mbuf_tailroom_req :
353 				tailroom_sz;
354 	}
355 
356 	dev_info->driver_id = dev->driver_id;
357 	dev_info->feature_flags = dev->feature_flags;
358 	dev_info->capabilities = sched_ctx->capabilities;
359 	dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
360 	dev_info->min_mbuf_headroom_req = headroom_sz;
361 	dev_info->min_mbuf_tailroom_req = tailroom_sz;
362 	dev_info->sym.max_nb_sessions = max_nb_sess;
363 }
364 
365 /** Release queue pair */
366 static int
367 scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
368 {
369 	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
370 
371 	if (!qp_ctx)
372 		return 0;
373 
374 	if (qp_ctx->order_ring)
375 		rte_ring_free(qp_ctx->order_ring);
376 	if (qp_ctx->private_qp_ctx)
377 		rte_free(qp_ctx->private_qp_ctx);
378 
379 	rte_free(qp_ctx);
380 	dev->data->queue_pairs[qp_id] = NULL;
381 
382 	return 0;
383 }
384 
385 /** Setup a queue pair */
386 static int
387 scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
388 	const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
389 {
390 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
391 	struct scheduler_qp_ctx *qp_ctx;
392 	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
393 	uint32_t i;
394 	int ret;
395 
396 	if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
397 			"CRYTO_SCHE PMD %u QP %u",
398 			dev->data->dev_id, qp_id) < 0) {
399 		CR_SCHED_LOG(ERR, "Failed to create unique queue pair name");
400 		return -EFAULT;
401 	}
402 
403 	/* Free memory prior to re-allocation if needed. */
404 	if (dev->data->queue_pairs[qp_id] != NULL)
405 		scheduler_pmd_qp_release(dev, qp_id);
406 
407 	for (i = 0; i < sched_ctx->nb_workers; i++) {
408 		uint8_t worker_id = sched_ctx->workers[i].dev_id;
409 
410 		/*
411 		 * All workers will share the same session mempool
412 		 * for session-less operations, so the objects
413 		 * must be big enough for all the drivers used.
414 		 */
415 		ret = rte_cryptodev_queue_pair_setup(worker_id, qp_id,
416 				qp_conf, socket_id);
417 		if (ret < 0)
418 			return ret;
419 	}
420 
421 	/* Allocate the queue pair data structure. */
422 	qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
423 			socket_id);
424 	if (qp_ctx == NULL)
425 		return -ENOMEM;
426 
427 	/* The actual available object number = nb_descriptors - 1 */
428 	qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1;
429 
430 	dev->data->queue_pairs[qp_id] = qp_ctx;
431 
432 	/* although scheduler_attach_init_worker presents multiple times,
433 	 * there will be only 1 meaningful execution.
434 	 */
435 	ret = scheduler_attach_init_worker(dev);
436 	if (ret < 0) {
437 		CR_SCHED_LOG(ERR, "Failed to attach worker");
438 		scheduler_pmd_qp_release(dev, qp_id);
439 		return ret;
440 	}
441 
442 	if (*sched_ctx->ops.config_queue_pair) {
443 		if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) {
444 			CR_SCHED_LOG(ERR, "Unable to configure queue pair");
445 			return -1;
446 		}
447 	}
448 
449 	return 0;
450 }
451 
452 static uint32_t
453 scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
454 {
455 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
456 	uint8_t i = 0;
457 	uint32_t max_priv_sess_size = 0;
458 
459 	/* Check what is the maximum private session size for all workers */
460 	for (i = 0; i < sched_ctx->nb_workers; i++) {
461 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
462 		struct rte_cryptodev *dev = &rte_cryptodevs[worker_dev_id];
463 		uint32_t priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
464 
465 		if (max_priv_sess_size < priv_sess_size)
466 			max_priv_sess_size = priv_sess_size;
467 	}
468 
469 	return max_priv_sess_size;
470 }
471 
472 static int
473 scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev,
474 	struct rte_crypto_sym_xform *xform,
475 	struct rte_cryptodev_sym_session *sess,
476 	struct rte_mempool *mempool)
477 {
478 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
479 	uint32_t i;
480 	int ret;
481 
482 	for (i = 0; i < sched_ctx->nb_workers; i++) {
483 		struct scheduler_worker *worker = &sched_ctx->workers[i];
484 
485 		ret = rte_cryptodev_sym_session_init(worker->dev_id, sess,
486 					xform, mempool);
487 		if (ret < 0) {
488 			CR_SCHED_LOG(ERR, "unable to config sym session");
489 			return ret;
490 		}
491 	}
492 
493 	return 0;
494 }
495 
496 /** Clear the memory of session so it doesn't leave key material behind */
497 static void
498 scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev,
499 		struct rte_cryptodev_sym_session *sess)
500 {
501 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
502 	uint32_t i;
503 
504 	/* Clear private data of workers */
505 	for (i = 0; i < sched_ctx->nb_workers; i++) {
506 		struct scheduler_worker *worker = &sched_ctx->workers[i];
507 
508 		rte_cryptodev_sym_session_clear(worker->dev_id, sess);
509 	}
510 }
511 
512 static struct rte_cryptodev_ops scheduler_pmd_ops = {
513 		.dev_configure		= scheduler_pmd_config,
514 		.dev_start		= scheduler_pmd_start,
515 		.dev_stop		= scheduler_pmd_stop,
516 		.dev_close		= scheduler_pmd_close,
517 
518 		.stats_get		= scheduler_pmd_stats_get,
519 		.stats_reset		= scheduler_pmd_stats_reset,
520 
521 		.dev_infos_get		= scheduler_pmd_info_get,
522 
523 		.queue_pair_setup	= scheduler_pmd_qp_setup,
524 		.queue_pair_release	= scheduler_pmd_qp_release,
525 
526 		.sym_session_get_size	= scheduler_pmd_sym_session_get_size,
527 		.sym_session_configure	= scheduler_pmd_sym_session_configure,
528 		.sym_session_clear	= scheduler_pmd_sym_session_clear,
529 };
530 
531 struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;
532