xref: /dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c (revision af0785a2447b307965377b62f46a5f39457a85a3)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 #include <string.h>
5 
6 #include <rte_common.h>
7 #include <rte_malloc.h>
8 #include <dev_driver.h>
9 #include <rte_cryptodev.h>
10 #include <cryptodev_pmd.h>
11 #include <rte_reorder.h>
12 #include <rte_errno.h>
13 
14 #include "scheduler_pmd_private.h"
15 
16 /** attaching the workers predefined by scheduler's EAL options */
17 static int
18 scheduler_attach_init_worker(struct rte_cryptodev *dev)
19 {
20 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
21 	uint8_t scheduler_id = dev->data->dev_id;
22 	int i;
23 
24 	for (i = sched_ctx->nb_init_workers - 1; i >= 0; i--) {
25 		const char *dev_name = sched_ctx->init_worker_names[i];
26 		struct rte_cryptodev *worker_dev =
27 				rte_cryptodev_pmd_get_named_dev(dev_name);
28 		int status;
29 
30 		if (!worker_dev) {
31 			CR_SCHED_LOG(ERR, "Failed to locate worker dev %s",
32 					dev_name);
33 			return -EINVAL;
34 		}
35 
36 		status = rte_cryptodev_scheduler_worker_attach(
37 				scheduler_id, worker_dev->data->dev_id);
38 
39 		if (status < 0) {
40 			CR_SCHED_LOG(ERR, "Failed to attach worker cryptodev %u",
41 					worker_dev->data->dev_id);
42 			return status;
43 		}
44 
45 		CR_SCHED_LOG(INFO, "Scheduler %s attached worker %s",
46 				dev->data->name,
47 				sched_ctx->init_worker_names[i]);
48 
49 		rte_free(sched_ctx->init_worker_names[i]);
50 		sched_ctx->init_worker_names[i] = NULL;
51 
52 		sched_ctx->nb_init_workers -= 1;
53 	}
54 
55 	return 0;
56 }
57 /** Configure device */
58 static int
59 scheduler_pmd_config(struct rte_cryptodev *dev,
60 		struct rte_cryptodev_config *config)
61 {
62 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
63 	uint32_t i;
64 	int ret;
65 
66 	/* although scheduler_attach_init_worker presents multiple times,
67 	 * there will be only 1 meaningful execution.
68 	 */
69 	ret = scheduler_attach_init_worker(dev);
70 	if (ret < 0)
71 		return ret;
72 
73 	for (i = 0; i < sched_ctx->nb_workers; i++) {
74 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
75 
76 		ret = rte_cryptodev_configure(worker_dev_id, config);
77 		if (ret < 0)
78 			break;
79 	}
80 
81 	return ret;
82 }
83 
84 static int
85 update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
86 {
87 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
88 	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
89 
90 	if (sched_ctx->reordering_enabled) {
91 		char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
92 		uint32_t buff_size = rte_align32pow2(
93 			sched_ctx->nb_workers * PER_WORKER_BUFF_SIZE);
94 
95 		if (qp_ctx->order_ring) {
96 			rte_ring_free(qp_ctx->order_ring);
97 			qp_ctx->order_ring = NULL;
98 		}
99 
100 		if (!buff_size)
101 			return 0;
102 
103 		if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN,
104 			"%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
105 			dev->data->dev_id, qp_id) < 0) {
106 			CR_SCHED_LOG(ERR, "failed to create unique reorder buffer"
107 					"name");
108 			return -ENOMEM;
109 		}
110 
111 		qp_ctx->order_ring = rte_ring_create(order_ring_name,
112 				buff_size, rte_socket_id(),
113 				RING_F_SP_ENQ | RING_F_SC_DEQ);
114 		if (!qp_ctx->order_ring) {
115 			CR_SCHED_LOG(ERR, "failed to create order ring");
116 			return -ENOMEM;
117 		}
118 	} else {
119 		if (qp_ctx->order_ring) {
120 			rte_ring_free(qp_ctx->order_ring);
121 			qp_ctx->order_ring = NULL;
122 		}
123 	}
124 
125 	return 0;
126 }
127 
128 /** Start device */
129 static int
130 scheduler_pmd_start(struct rte_cryptodev *dev)
131 {
132 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
133 	uint32_t i;
134 	int ret;
135 
136 	if (dev->data->dev_started)
137 		return 0;
138 
139 	/* although scheduler_attach_init_worker presents multiple times,
140 	 * there will be only 1 meaningful execution.
141 	 */
142 	ret = scheduler_attach_init_worker(dev);
143 	if (ret < 0)
144 		return ret;
145 
146 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
147 		ret = update_order_ring(dev, i);
148 		if (ret < 0) {
149 			CR_SCHED_LOG(ERR, "Failed to update reorder buffer");
150 			return ret;
151 		}
152 	}
153 
154 	if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) {
155 		CR_SCHED_LOG(ERR, "Scheduler mode is not set");
156 		return -1;
157 	}
158 
159 	if (!sched_ctx->nb_workers) {
160 		CR_SCHED_LOG(ERR, "No worker in the scheduler");
161 		return -1;
162 	}
163 
164 	if (*sched_ctx->ops.worker_attach == NULL)
165 		return -ENOTSUP;
166 
167 	for (i = 0; i < sched_ctx->nb_workers; i++) {
168 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
169 
170 		if ((*sched_ctx->ops.worker_attach)(dev, worker_dev_id) < 0) {
171 			CR_SCHED_LOG(ERR, "Failed to attach worker");
172 			return -ENOTSUP;
173 		}
174 	}
175 
176 	if (*sched_ctx->ops.scheduler_start == NULL)
177 		return -ENOTSUP;
178 
179 	if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
180 		CR_SCHED_LOG(ERR, "Scheduler start failed");
181 		return -1;
182 	}
183 
184 	/* start all workers */
185 	for (i = 0; i < sched_ctx->nb_workers; i++) {
186 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
187 		ret = rte_cryptodev_start(worker_dev_id);
188 		if (ret < 0) {
189 			CR_SCHED_LOG(ERR, "Failed to start worker dev %u",
190 					worker_dev_id);
191 			return ret;
192 		}
193 	}
194 
195 	return 0;
196 }
197 
198 /** Stop device */
199 static void
200 scheduler_pmd_stop(struct rte_cryptodev *dev)
201 {
202 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
203 	uint32_t i;
204 
205 	if (!dev->data->dev_started)
206 		return;
207 
208 	/* stop all workers first */
209 	for (i = 0; i < sched_ctx->nb_workers; i++) {
210 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
211 
212 		rte_cryptodev_stop(worker_dev_id);
213 	}
214 
215 	if (*sched_ctx->ops.scheduler_stop)
216 		(*sched_ctx->ops.scheduler_stop)(dev);
217 
218 	for (i = 0; i < sched_ctx->nb_workers; i++) {
219 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
220 
221 		if (*sched_ctx->ops.worker_detach)
222 			(*sched_ctx->ops.worker_detach)(dev, worker_dev_id);
223 	}
224 }
225 
226 /** Close device */
227 static int
228 scheduler_pmd_close(struct rte_cryptodev *dev)
229 {
230 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
231 	uint32_t i;
232 	int ret;
233 
234 	/* the dev should be stopped before being closed */
235 	if (dev->data->dev_started)
236 		return -EBUSY;
237 
238 	/* close all workers first */
239 	for (i = 0; i < sched_ctx->nb_workers; i++) {
240 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
241 		struct rte_cryptodev *worker_dev =
242 				rte_cryptodev_pmd_get_dev(worker_dev_id);
243 
244 		ret = (*worker_dev->dev_ops->dev_close)(worker_dev);
245 		if (ret < 0)
246 			return ret;
247 	}
248 
249 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
250 		struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
251 
252 		if (qp_ctx->order_ring) {
253 			rte_ring_free(qp_ctx->order_ring);
254 			qp_ctx->order_ring = NULL;
255 		}
256 
257 		if (qp_ctx->private_qp_ctx) {
258 			rte_free(qp_ctx->private_qp_ctx);
259 			qp_ctx->private_qp_ctx = NULL;
260 		}
261 	}
262 
263 	if (sched_ctx->private_ctx) {
264 		rte_free(sched_ctx->private_ctx);
265 		sched_ctx->private_ctx = NULL;
266 	}
267 
268 	if (sched_ctx->capabilities) {
269 		rte_free(sched_ctx->capabilities);
270 		sched_ctx->capabilities = NULL;
271 	}
272 
273 	return 0;
274 }
275 
276 /** Get device statistics */
277 static void
278 scheduler_pmd_stats_get(struct rte_cryptodev *dev,
279 	struct rte_cryptodev_stats *stats)
280 {
281 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
282 	uint32_t i;
283 
284 	for (i = 0; i < sched_ctx->nb_workers; i++) {
285 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
286 		struct rte_cryptodev *worker_dev =
287 				rte_cryptodev_pmd_get_dev(worker_dev_id);
288 		struct rte_cryptodev_stats worker_stats = {0};
289 
290 		(*worker_dev->dev_ops->stats_get)(worker_dev, &worker_stats);
291 
292 		stats->enqueued_count += worker_stats.enqueued_count;
293 		stats->dequeued_count += worker_stats.dequeued_count;
294 
295 		stats->enqueue_err_count += worker_stats.enqueue_err_count;
296 		stats->dequeue_err_count += worker_stats.dequeue_err_count;
297 	}
298 }
299 
300 /** Reset device statistics */
301 static void
302 scheduler_pmd_stats_reset(struct rte_cryptodev *dev)
303 {
304 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
305 	uint32_t i;
306 
307 	for (i = 0; i < sched_ctx->nb_workers; i++) {
308 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
309 		struct rte_cryptodev *worker_dev =
310 				rte_cryptodev_pmd_get_dev(worker_dev_id);
311 
312 		(*worker_dev->dev_ops->stats_reset)(worker_dev);
313 	}
314 }
315 
316 /** Get device info */
317 static void
318 scheduler_pmd_info_get(struct rte_cryptodev *dev,
319 		struct rte_cryptodev_info *dev_info)
320 {
321 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
322 	uint32_t max_nb_sess = 0;
323 	uint16_t headroom_sz = 0;
324 	uint16_t tailroom_sz = 0;
325 	uint32_t i;
326 
327 	if (!dev_info)
328 		return;
329 
330 	/* although scheduler_attach_init_worker presents multiple times,
331 	 * there will be only 1 meaningful execution.
332 	 */
333 	scheduler_attach_init_worker(dev);
334 
335 	for (i = 0; i < sched_ctx->nb_workers; i++) {
336 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
337 		struct rte_cryptodev_info worker_info;
338 
339 		rte_cryptodev_info_get(worker_dev_id, &worker_info);
340 		uint32_t dev_max_sess = worker_info.sym.max_nb_sessions;
341 		if (dev_max_sess != 0) {
342 			if (max_nb_sess == 0 ||	dev_max_sess < max_nb_sess)
343 				max_nb_sess = worker_info.sym.max_nb_sessions;
344 		}
345 
346 		/* Get the max headroom requirement among worker PMDs */
347 		headroom_sz = worker_info.min_mbuf_headroom_req >
348 				headroom_sz ?
349 				worker_info.min_mbuf_headroom_req :
350 				headroom_sz;
351 
352 		/* Get the max tailroom requirement among worker PMDs */
353 		tailroom_sz = worker_info.min_mbuf_tailroom_req >
354 				tailroom_sz ?
355 				worker_info.min_mbuf_tailroom_req :
356 				tailroom_sz;
357 	}
358 
359 	dev_info->driver_id = dev->driver_id;
360 	dev_info->feature_flags = dev->feature_flags;
361 	dev_info->capabilities = sched_ctx->capabilities;
362 	dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
363 	dev_info->min_mbuf_headroom_req = headroom_sz;
364 	dev_info->min_mbuf_tailroom_req = tailroom_sz;
365 	dev_info->sym.max_nb_sessions = max_nb_sess;
366 }
367 
368 /** Release queue pair */
369 static int
370 scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
371 {
372 	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
373 
374 	if (!qp_ctx)
375 		return 0;
376 
377 	rte_ring_free(qp_ctx->order_ring);
378 	rte_free(qp_ctx->private_qp_ctx);
379 
380 	rte_free(qp_ctx);
381 	dev->data->queue_pairs[qp_id] = NULL;
382 
383 	return 0;
384 }
385 
386 /** Setup a queue pair */
387 static int
388 scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
389 	const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
390 {
391 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
392 	struct scheduler_qp_ctx *qp_ctx;
393 	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
394 	uint32_t i;
395 	int ret;
396 
397 	if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
398 			"CRYTO_SCHE PMD %u QP %u",
399 			dev->data->dev_id, qp_id) < 0) {
400 		CR_SCHED_LOG(ERR, "Failed to create unique queue pair name");
401 		return -EFAULT;
402 	}
403 
404 	/* Free memory prior to re-allocation if needed. */
405 	if (dev->data->queue_pairs[qp_id] != NULL)
406 		scheduler_pmd_qp_release(dev, qp_id);
407 
408 	for (i = 0; i < sched_ctx->nb_workers; i++) {
409 		uint8_t worker_id = sched_ctx->workers[i].dev_id;
410 
411 		/*
412 		 * All workers will share the same session mempool
413 		 * for session-less operations, so the objects
414 		 * must be big enough for all the drivers used.
415 		 */
416 		ret = rte_cryptodev_queue_pair_setup(worker_id, qp_id,
417 				qp_conf, socket_id);
418 		if (ret < 0)
419 			return ret;
420 	}
421 
422 	/* Allocate the queue pair data structure. */
423 	qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
424 			socket_id);
425 	if (qp_ctx == NULL)
426 		return -ENOMEM;
427 
428 	/* The actual available object number = nb_descriptors - 1 */
429 	qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1;
430 
431 	dev->data->queue_pairs[qp_id] = qp_ctx;
432 
433 	/* although scheduler_attach_init_worker presents multiple times,
434 	 * there will be only 1 meaningful execution.
435 	 */
436 	ret = scheduler_attach_init_worker(dev);
437 	if (ret < 0) {
438 		CR_SCHED_LOG(ERR, "Failed to attach worker");
439 		scheduler_pmd_qp_release(dev, qp_id);
440 		return ret;
441 	}
442 
443 	if (*sched_ctx->ops.config_queue_pair) {
444 		if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) {
445 			CR_SCHED_LOG(ERR, "Unable to configure queue pair");
446 			return -1;
447 		}
448 	}
449 
450 	return 0;
451 }
452 
453 static uint32_t
454 scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
455 {
456 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
457 	uint8_t i = 0;
458 	uint32_t max_priv_sess_size = 0;
459 
460 	/* Check what is the maximum private session size for all workers */
461 	for (i = 0; i < sched_ctx->nb_workers; i++) {
462 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
463 		struct rte_cryptodev *dev = &rte_cryptodevs[worker_dev_id];
464 		uint32_t priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
465 
466 		if (max_priv_sess_size < priv_sess_size)
467 			max_priv_sess_size = priv_sess_size;
468 	}
469 
470 	return max_priv_sess_size;
471 }
472 
473 struct scheduler_configured_sess_info {
474 	uint8_t dev_id;
475 	uint8_t driver_id;
476 	struct rte_cryptodev_sym_session *sess;
477 };
478 
479 static int
480 scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev,
481 	struct rte_crypto_sym_xform *xform,
482 	struct rte_cryptodev_sym_session *sess)
483 {
484 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
485 	struct rte_mempool *mp = rte_mempool_from_obj(sess);
486 	struct scheduler_session_ctx *sess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
487 	struct scheduler_configured_sess_info configured_sess[
488 			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}};
489 	uint32_t i, j, n_configured_sess = 0;
490 	int ret = 0;
491 
492 	if (mp == NULL)
493 		return -EINVAL;
494 
495 	for (i = 0; i < sched_ctx->nb_workers; i++) {
496 		struct scheduler_worker *worker = &sched_ctx->workers[i];
497 		struct rte_cryptodev_sym_session *worker_sess;
498 		uint8_t next_worker = 0;
499 
500 		for (j = 0; j < n_configured_sess; j++) {
501 			if (configured_sess[j].driver_id ==
502 					worker->driver_id) {
503 				sess_ctx->worker_sess[i] =
504 					configured_sess[j].sess;
505 				next_worker = 1;
506 				break;
507 			}
508 		}
509 		if (next_worker)
510 			continue;
511 
512 		if (rte_mempool_avail_count(mp) == 0) {
513 			ret = -ENOMEM;
514 			goto error_exit;
515 		}
516 
517 		worker_sess = rte_cryptodev_sym_session_create(worker->dev_id,
518 			xform, mp);
519 		if (worker_sess == NULL) {
520 			ret = -rte_errno;
521 			goto error_exit;
522 		}
523 
524 		worker_sess->opaque_data = (uint64_t)sess;
525 		sess_ctx->worker_sess[i] = worker_sess;
526 		configured_sess[n_configured_sess].driver_id =
527 			worker->driver_id;
528 		configured_sess[n_configured_sess].dev_id = worker->dev_id;
529 		configured_sess[n_configured_sess].sess = worker_sess;
530 		n_configured_sess++;
531 	}
532 
533 	return 0;
534 error_exit:
535 	sess_ctx->ref_cnt = sched_ctx->ref_cnt;
536 	for (i = 0; i < n_configured_sess; i++)
537 		rte_cryptodev_sym_session_free(configured_sess[i].dev_id,
538 			configured_sess[i].sess);
539 	return ret;
540 }
541 
542 /** Clear the memory of session so it doesn't leave key material behind */
543 static void
544 scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev,
545 		struct rte_cryptodev_sym_session *sess)
546 {
547 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
548 	struct scheduler_session_ctx *sess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
549 	struct scheduler_configured_sess_info deleted_sess[
550 			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}};
551 	uint32_t i, j, n_deleted_sess = 0;
552 
553 	if (sched_ctx->ref_cnt != sess_ctx->ref_cnt) {
554 		CR_SCHED_LOG(WARNING,
555 			"Worker updated between session creation/deletion. "
556 			"The session may not be freed fully.");
557 	}
558 
559 	for (i = 0; i < sched_ctx->nb_workers; i++) {
560 		struct scheduler_worker *worker = &sched_ctx->workers[i];
561 		uint8_t next_worker = 0;
562 
563 		for (j = 0; j < n_deleted_sess; j++) {
564 			if (deleted_sess[j].driver_id == worker->driver_id) {
565 				sess_ctx->worker_sess[i] = NULL;
566 				next_worker = 1;
567 				break;
568 			}
569 		}
570 		if (next_worker)
571 			continue;
572 
573 		rte_cryptodev_sym_session_free(worker->dev_id,
574 			sess_ctx->worker_sess[i]);
575 
576 		deleted_sess[n_deleted_sess++].driver_id = worker->driver_id;
577 		sess_ctx->worker_sess[i] = NULL;
578 	}
579 }
580 
581 static struct rte_cryptodev_ops scheduler_pmd_ops = {
582 		.dev_configure		= scheduler_pmd_config,
583 		.dev_start		= scheduler_pmd_start,
584 		.dev_stop		= scheduler_pmd_stop,
585 		.dev_close		= scheduler_pmd_close,
586 
587 		.stats_get		= scheduler_pmd_stats_get,
588 		.stats_reset		= scheduler_pmd_stats_reset,
589 
590 		.dev_infos_get		= scheduler_pmd_info_get,
591 
592 		.queue_pair_setup	= scheduler_pmd_qp_setup,
593 		.queue_pair_release	= scheduler_pmd_qp_release,
594 
595 		.sym_session_get_size	= scheduler_pmd_sym_session_get_size,
596 		.sym_session_configure	= scheduler_pmd_sym_session_configure,
597 		.sym_session_clear	= scheduler_pmd_sym_session_clear,
598 };
599 
600 struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;
601