xref: /dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c (revision b00bf84f0d3eb4c6a2944c918f697dc17cb3fce5)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 #include <string.h>
5 
6 #include <rte_common.h>
7 #include <rte_malloc.h>
8 #include <dev_driver.h>
9 #include <rte_cryptodev.h>
10 #include <cryptodev_pmd.h>
11 #include <rte_security_driver.h>
12 #include <rte_reorder.h>
13 #include <rte_errno.h>
14 
15 #include "scheduler_pmd_private.h"
16 
17 struct scheduler_configured_sess_info {
18 	uint8_t dev_id;
19 	uint8_t driver_id;
20 	union {
21 		struct rte_cryptodev_sym_session *sess;
22 		struct {
23 			struct rte_security_session *sec_sess;
24 			struct rte_security_ctx *sec_ctx;
25 		};
26 	};
27 };
28 
29 static int
30 scheduler_session_create(void *sess, void *sess_params,
31 		struct scheduler_ctx *sched_ctx,
32 		enum rte_crypto_op_sess_type session_type)
33 {
34 	struct rte_mempool *mp = rte_mempool_from_obj(sess);
35 	struct scheduler_session_ctx *sess_ctx;
36 	struct scheduler_configured_sess_info configured_sess[
37 			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}};
38 	uint32_t i, j, n_configured_sess = 0;
39 	int ret = 0;
40 
41 	if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
42 		sess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
43 	else
44 		sess_ctx = SECURITY_GET_SESS_PRIV(sess);
45 
46 	if (mp == NULL)
47 		return -EINVAL;
48 
49 	for (i = 0; i < sched_ctx->nb_workers; i++) {
50 		struct scheduler_worker *worker = &sched_ctx->workers[i];
51 		struct rte_cryptodev *dev = &rte_cryptodevs[worker->dev_id];
52 		uint8_t next_worker = 0;
53 
54 		for (j = 0; j < n_configured_sess; j++) {
55 			if (configured_sess[j].driver_id == worker->driver_id) {
56 				if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
57 					sess_ctx->worker_sess[i] =
58 						configured_sess[j].sess;
59 				else
60 					sess_ctx->worker_sec_sess[i] =
61 						configured_sess[j].sec_sess;
62 
63 				next_worker = 1;
64 				break;
65 			}
66 		}
67 		if (next_worker)
68 			continue;
69 
70 		if (rte_mempool_avail_count(mp) == 0) {
71 			ret = -ENOMEM;
72 			goto error_exit;
73 		}
74 
75 		if (session_type == RTE_CRYPTO_OP_WITH_SESSION) {
76 			struct rte_cryptodev_sym_session *worker_sess =
77 				rte_cryptodev_sym_session_create(worker->dev_id,
78 						sess_params, mp);
79 
80 			if (worker_sess == NULL) {
81 				ret = -rte_errno;
82 				goto error_exit;
83 			}
84 
85 			worker_sess->opaque_data = (uint64_t)sess;
86 			sess_ctx->worker_sess[i] = worker_sess;
87 			configured_sess[n_configured_sess].sess = worker_sess;
88 		} else {
89 			struct rte_security_session *worker_sess =
90 				rte_security_session_create(dev->security_ctx,
91 						sess_params, mp);
92 
93 			if (worker_sess == NULL) {
94 				ret = -rte_errno;
95 				goto error_exit;
96 			}
97 
98 			worker_sess->opaque_data = (uint64_t)sess;
99 			sess_ctx->worker_sec_sess[i] = worker_sess;
100 			configured_sess[n_configured_sess].sec_sess =
101 							worker_sess;
102 			configured_sess[n_configured_sess].sec_ctx =
103 							dev->security_ctx;
104 		}
105 
106 		configured_sess[n_configured_sess].driver_id =
107 							worker->driver_id;
108 		configured_sess[n_configured_sess].dev_id = worker->dev_id;
109 		n_configured_sess++;
110 	}
111 
112 	return 0;
113 
114 error_exit:
115 	sess_ctx->ref_cnt = sched_ctx->ref_cnt;
116 	for (i = 0; i < n_configured_sess; i++) {
117 		if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
118 			rte_cryptodev_sym_session_free(
119 						configured_sess[i].dev_id,
120 						configured_sess[i].sess);
121 		else
122 			rte_security_session_destroy(
123 						configured_sess[i].sec_ctx,
124 						configured_sess[i].sec_sess);
125 	}
126 
127 	return ret;
128 }
129 
130 static void
131 scheduler_session_destroy(void *sess, struct scheduler_ctx *sched_ctx,
132 		uint8_t session_type)
133 {
134 	struct scheduler_session_ctx *sess_ctx;
135 	struct scheduler_configured_sess_info deleted_sess[
136 			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}};
137 	uint32_t i, j, n_deleted_sess = 0;
138 
139 	if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
140 		sess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
141 	else
142 		sess_ctx = SECURITY_GET_SESS_PRIV(sess);
143 
144 	if (sched_ctx->ref_cnt != sess_ctx->ref_cnt) {
145 		CR_SCHED_LOG(WARNING,
146 			"Worker updated between session creation/deletion. "
147 			"The session may not be freed fully.");
148 	}
149 
150 	for (i = 0; i < sched_ctx->nb_workers; i++) {
151 		struct scheduler_worker *worker = &sched_ctx->workers[i];
152 		struct rte_cryptodev *dev = &rte_cryptodevs[worker->dev_id];
153 		uint8_t next_worker = 0;
154 
155 		for (j = 0; j < n_deleted_sess; j++) {
156 			if (deleted_sess[j].driver_id == worker->driver_id) {
157 				if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
158 					sess_ctx->worker_sess[i] = NULL;
159 				else
160 					sess_ctx->worker_sec_sess[i] = NULL;
161 
162 				next_worker = 1;
163 				break;
164 			}
165 		}
166 		if (next_worker)
167 			continue;
168 
169 		if (session_type == RTE_CRYPTO_OP_WITH_SESSION) {
170 			rte_cryptodev_sym_session_free(worker->dev_id,
171 						sess_ctx->worker_sess[i]);
172 			sess_ctx->worker_sess[i] = NULL;
173 		} else {
174 			rte_security_session_destroy(dev->security_ctx,
175 						sess_ctx->worker_sec_sess[i]);
176 			sess_ctx->worker_sec_sess[i] = NULL;
177 		}
178 
179 		deleted_sess[n_deleted_sess++].driver_id = worker->driver_id;
180 	}
181 }
182 
183 static unsigned int
184 scheduler_session_size_get(struct scheduler_ctx *sched_ctx,
185 		uint8_t session_type)
186 {
187 	uint8_t i = 0;
188 	uint32_t max_priv_sess_size = sizeof(struct scheduler_session_ctx);
189 
190 	/* Check what is the maximum private session size for all workers */
191 	for (i = 0; i < sched_ctx->nb_workers; i++) {
192 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
193 		struct rte_cryptodev *dev = &rte_cryptodevs[worker_dev_id];
194 		struct rte_security_ctx *sec_ctx = dev->security_ctx;
195 		uint32_t priv_sess_size = 0;
196 
197 		if (session_type == RTE_CRYPTO_OP_WITH_SESSION) {
198 			priv_sess_size =
199 				(*dev->dev_ops->sym_session_get_size)(dev);
200 		} else {
201 			priv_sess_size = (*sec_ctx->ops->session_get_size)(dev);
202 		}
203 
204 		max_priv_sess_size = RTE_MAX(max_priv_sess_size, priv_sess_size);
205 	}
206 
207 	return max_priv_sess_size;
208 }
209 
210 /** attaching the workers predefined by scheduler's EAL options */
211 static int
212 scheduler_attach_init_worker(struct rte_cryptodev *dev)
213 {
214 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
215 	uint8_t scheduler_id = dev->data->dev_id;
216 	int i;
217 
218 	for (i = sched_ctx->nb_init_workers - 1; i >= 0; i--) {
219 		const char *dev_name = sched_ctx->init_worker_names[i];
220 		struct rte_cryptodev *worker_dev =
221 				rte_cryptodev_pmd_get_named_dev(dev_name);
222 		int status;
223 
224 		if (!worker_dev) {
225 			CR_SCHED_LOG(ERR, "Failed to locate worker dev %s",
226 					dev_name);
227 			return -EINVAL;
228 		}
229 
230 		status = rte_cryptodev_scheduler_worker_attach(
231 				scheduler_id, worker_dev->data->dev_id);
232 
233 		if (status < 0) {
234 			CR_SCHED_LOG(ERR, "Failed to attach worker cryptodev %u",
235 					worker_dev->data->dev_id);
236 			return status;
237 		}
238 
239 		CR_SCHED_LOG(INFO, "Scheduler %s attached worker %s",
240 				dev->data->name,
241 				sched_ctx->init_worker_names[i]);
242 
243 		rte_free(sched_ctx->init_worker_names[i]);
244 		sched_ctx->init_worker_names[i] = NULL;
245 
246 		sched_ctx->nb_init_workers -= 1;
247 	}
248 
249 	return 0;
250 }
251 /** Configure device */
252 static int
253 scheduler_pmd_config(struct rte_cryptodev *dev,
254 		struct rte_cryptodev_config *config)
255 {
256 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
257 	uint32_t i;
258 	int ret;
259 
260 	/* although scheduler_attach_init_worker presents multiple times,
261 	 * there will be only 1 meaningful execution.
262 	 */
263 	ret = scheduler_attach_init_worker(dev);
264 	if (ret < 0)
265 		return ret;
266 
267 	for (i = 0; i < sched_ctx->nb_workers; i++) {
268 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
269 
270 		ret = rte_cryptodev_configure(worker_dev_id, config);
271 		if (ret < 0)
272 			break;
273 	}
274 
275 	return ret;
276 }
277 
278 static int
279 update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
280 {
281 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
282 	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
283 
284 	if (sched_ctx->reordering_enabled) {
285 		char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
286 		uint32_t buff_size = rte_align32pow2(
287 			sched_ctx->nb_workers * PER_WORKER_BUFF_SIZE);
288 
289 		if (qp_ctx->order_ring) {
290 			rte_ring_free(qp_ctx->order_ring);
291 			qp_ctx->order_ring = NULL;
292 		}
293 
294 		if (!buff_size)
295 			return 0;
296 
297 		if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN,
298 			"%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
299 			dev->data->dev_id, qp_id) < 0) {
300 			CR_SCHED_LOG(ERR, "failed to create unique reorder buffer"
301 					"name");
302 			return -ENOMEM;
303 		}
304 
305 		qp_ctx->order_ring = rte_ring_create(order_ring_name,
306 				buff_size, rte_socket_id(),
307 				RING_F_SP_ENQ | RING_F_SC_DEQ);
308 		if (!qp_ctx->order_ring) {
309 			CR_SCHED_LOG(ERR, "failed to create order ring");
310 			return -ENOMEM;
311 		}
312 	} else {
313 		if (qp_ctx->order_ring) {
314 			rte_ring_free(qp_ctx->order_ring);
315 			qp_ctx->order_ring = NULL;
316 		}
317 	}
318 
319 	return 0;
320 }
321 
322 /** Start device */
323 static int
324 scheduler_pmd_start(struct rte_cryptodev *dev)
325 {
326 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
327 	uint32_t i;
328 	int ret;
329 
330 	if (dev->data->dev_started)
331 		return 0;
332 
333 	/* although scheduler_attach_init_worker presents multiple times,
334 	 * there will be only 1 meaningful execution.
335 	 */
336 	ret = scheduler_attach_init_worker(dev);
337 	if (ret < 0)
338 		return ret;
339 
340 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
341 		ret = update_order_ring(dev, i);
342 		if (ret < 0) {
343 			CR_SCHED_LOG(ERR, "Failed to update reorder buffer");
344 			return ret;
345 		}
346 	}
347 
348 	if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) {
349 		CR_SCHED_LOG(ERR, "Scheduler mode is not set");
350 		return -1;
351 	}
352 
353 	if (!sched_ctx->nb_workers) {
354 		CR_SCHED_LOG(ERR, "No worker in the scheduler");
355 		return -1;
356 	}
357 
358 	if (*sched_ctx->ops.worker_attach == NULL)
359 		return -ENOTSUP;
360 
361 	for (i = 0; i < sched_ctx->nb_workers; i++) {
362 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
363 
364 		if ((*sched_ctx->ops.worker_attach)(dev, worker_dev_id) < 0) {
365 			CR_SCHED_LOG(ERR, "Failed to attach worker");
366 			return -ENOTSUP;
367 		}
368 	}
369 
370 	if (*sched_ctx->ops.scheduler_start == NULL)
371 		return -ENOTSUP;
372 
373 	if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
374 		CR_SCHED_LOG(ERR, "Scheduler start failed");
375 		return -1;
376 	}
377 
378 	/* start all workers */
379 	for (i = 0; i < sched_ctx->nb_workers; i++) {
380 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
381 		ret = rte_cryptodev_start(worker_dev_id);
382 		if (ret < 0) {
383 			CR_SCHED_LOG(ERR, "Failed to start worker dev %u",
384 					worker_dev_id);
385 			return ret;
386 		}
387 	}
388 
389 	return 0;
390 }
391 
392 /** Stop device */
393 static void
394 scheduler_pmd_stop(struct rte_cryptodev *dev)
395 {
396 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
397 	uint32_t i;
398 
399 	if (!dev->data->dev_started)
400 		return;
401 
402 	/* stop all workers first */
403 	for (i = 0; i < sched_ctx->nb_workers; i++) {
404 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
405 
406 		rte_cryptodev_stop(worker_dev_id);
407 	}
408 
409 	if (*sched_ctx->ops.scheduler_stop)
410 		(*sched_ctx->ops.scheduler_stop)(dev);
411 
412 	for (i = 0; i < sched_ctx->nb_workers; i++) {
413 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
414 
415 		if (*sched_ctx->ops.worker_detach)
416 			(*sched_ctx->ops.worker_detach)(dev, worker_dev_id);
417 	}
418 }
419 
420 /** Close device */
421 static int
422 scheduler_pmd_close(struct rte_cryptodev *dev)
423 {
424 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
425 	uint32_t i;
426 	int ret;
427 
428 	/* the dev should be stopped before being closed */
429 	if (dev->data->dev_started)
430 		return -EBUSY;
431 
432 	/* close all workers first */
433 	for (i = 0; i < sched_ctx->nb_workers; i++) {
434 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
435 		struct rte_cryptodev *worker_dev =
436 				rte_cryptodev_pmd_get_dev(worker_dev_id);
437 
438 		ret = (*worker_dev->dev_ops->dev_close)(worker_dev);
439 		if (ret < 0)
440 			return ret;
441 	}
442 
443 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
444 		struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
445 
446 		if (qp_ctx->order_ring) {
447 			rte_ring_free(qp_ctx->order_ring);
448 			qp_ctx->order_ring = NULL;
449 		}
450 
451 		if (qp_ctx->private_qp_ctx) {
452 			rte_free(qp_ctx->private_qp_ctx);
453 			qp_ctx->private_qp_ctx = NULL;
454 		}
455 	}
456 
457 	if (sched_ctx->private_ctx) {
458 		rte_free(sched_ctx->private_ctx);
459 		sched_ctx->private_ctx = NULL;
460 	}
461 
462 	scheduler_free_capabilities(sched_ctx);
463 
464 	return 0;
465 }
466 
467 /** Get device statistics */
468 static void
469 scheduler_pmd_stats_get(struct rte_cryptodev *dev,
470 	struct rte_cryptodev_stats *stats)
471 {
472 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
473 	uint32_t i;
474 
475 	for (i = 0; i < sched_ctx->nb_workers; i++) {
476 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
477 		struct rte_cryptodev *worker_dev =
478 				rte_cryptodev_pmd_get_dev(worker_dev_id);
479 		struct rte_cryptodev_stats worker_stats = {0};
480 
481 		(*worker_dev->dev_ops->stats_get)(worker_dev, &worker_stats);
482 
483 		stats->enqueued_count += worker_stats.enqueued_count;
484 		stats->dequeued_count += worker_stats.dequeued_count;
485 
486 		stats->enqueue_err_count += worker_stats.enqueue_err_count;
487 		stats->dequeue_err_count += worker_stats.dequeue_err_count;
488 	}
489 }
490 
491 /** Reset device statistics */
492 static void
493 scheduler_pmd_stats_reset(struct rte_cryptodev *dev)
494 {
495 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
496 	uint32_t i;
497 
498 	for (i = 0; i < sched_ctx->nb_workers; i++) {
499 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
500 		struct rte_cryptodev *worker_dev =
501 				rte_cryptodev_pmd_get_dev(worker_dev_id);
502 
503 		(*worker_dev->dev_ops->stats_reset)(worker_dev);
504 	}
505 }
506 
507 /** Get device info */
508 static void
509 scheduler_pmd_info_get(struct rte_cryptodev *dev,
510 		struct rte_cryptodev_info *dev_info)
511 {
512 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
513 	uint32_t max_nb_sess = 0;
514 	uint16_t headroom_sz = 0;
515 	uint16_t tailroom_sz = 0;
516 	uint32_t i;
517 
518 	if (!dev_info)
519 		return;
520 
521 	/* although scheduler_attach_init_worker presents multiple times,
522 	 * there will be only 1 meaningful execution.
523 	 */
524 	scheduler_attach_init_worker(dev);
525 
526 	for (i = 0; i < sched_ctx->nb_workers; i++) {
527 		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
528 		struct rte_cryptodev_info worker_info;
529 
530 		rte_cryptodev_info_get(worker_dev_id, &worker_info);
531 		uint32_t dev_max_sess = worker_info.sym.max_nb_sessions;
532 		if (dev_max_sess != 0) {
533 			if (max_nb_sess == 0 ||	dev_max_sess < max_nb_sess)
534 				max_nb_sess = worker_info.sym.max_nb_sessions;
535 		}
536 
537 		/* Get the max headroom requirement among worker PMDs */
538 		headroom_sz = worker_info.min_mbuf_headroom_req >
539 				headroom_sz ?
540 				worker_info.min_mbuf_headroom_req :
541 				headroom_sz;
542 
543 		/* Get the max tailroom requirement among worker PMDs */
544 		tailroom_sz = worker_info.min_mbuf_tailroom_req >
545 				tailroom_sz ?
546 				worker_info.min_mbuf_tailroom_req :
547 				tailroom_sz;
548 	}
549 
550 	dev_info->driver_id = dev->driver_id;
551 	dev_info->feature_flags = dev->feature_flags;
552 	dev_info->capabilities = sched_ctx->capabilities;
553 	dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
554 	dev_info->min_mbuf_headroom_req = headroom_sz;
555 	dev_info->min_mbuf_tailroom_req = tailroom_sz;
556 	dev_info->sym.max_nb_sessions = max_nb_sess;
557 }
558 
559 /** Release queue pair */
560 static int
561 scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
562 {
563 	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
564 
565 	if (!qp_ctx)
566 		return 0;
567 
568 	rte_ring_free(qp_ctx->order_ring);
569 	rte_free(qp_ctx->private_qp_ctx);
570 
571 	rte_free(qp_ctx);
572 	dev->data->queue_pairs[qp_id] = NULL;
573 
574 	return 0;
575 }
576 
577 /** Setup a queue pair */
578 static int
579 scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
580 	const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
581 {
582 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
583 	struct scheduler_qp_ctx *qp_ctx;
584 	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
585 	uint32_t i;
586 	int ret;
587 
588 	if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
589 			"CRYTO_SCHE PMD %u QP %u",
590 			dev->data->dev_id, qp_id) < 0) {
591 		CR_SCHED_LOG(ERR, "Failed to create unique queue pair name");
592 		return -EFAULT;
593 	}
594 
595 	/* Free memory prior to re-allocation if needed. */
596 	if (dev->data->queue_pairs[qp_id] != NULL)
597 		scheduler_pmd_qp_release(dev, qp_id);
598 
599 	for (i = 0; i < sched_ctx->nb_workers; i++) {
600 		uint8_t worker_id = sched_ctx->workers[i].dev_id;
601 
602 		/*
603 		 * All workers will share the same session mempool
604 		 * for session-less operations, so the objects
605 		 * must be big enough for all the drivers used.
606 		 */
607 		ret = rte_cryptodev_queue_pair_setup(worker_id, qp_id,
608 				qp_conf, socket_id);
609 		if (ret < 0)
610 			return ret;
611 	}
612 
613 	/* Allocate the queue pair data structure. */
614 	qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
615 			socket_id);
616 	if (qp_ctx == NULL)
617 		return -ENOMEM;
618 
619 	/* The actual available object number = nb_descriptors - 1 */
620 	qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1;
621 
622 	dev->data->queue_pairs[qp_id] = qp_ctx;
623 
624 	/* although scheduler_attach_init_worker presents multiple times,
625 	 * there will be only 1 meaningful execution.
626 	 */
627 	ret = scheduler_attach_init_worker(dev);
628 	if (ret < 0) {
629 		CR_SCHED_LOG(ERR, "Failed to attach worker");
630 		scheduler_pmd_qp_release(dev, qp_id);
631 		return ret;
632 	}
633 
634 	if (*sched_ctx->ops.config_queue_pair) {
635 		if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) {
636 			CR_SCHED_LOG(ERR, "Unable to configure queue pair");
637 			return -1;
638 		}
639 	}
640 
641 	return 0;
642 }
643 
644 static uint32_t
645 scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev)
646 {
647 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
648 
649 	return scheduler_session_size_get(sched_ctx, RTE_CRYPTO_OP_WITH_SESSION);
650 }
651 
652 static int
653 scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev,
654 	struct rte_crypto_sym_xform *xform,
655 	struct rte_cryptodev_sym_session *sess)
656 {
657 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
658 
659 	return scheduler_session_create(sess, xform, sched_ctx, RTE_CRYPTO_OP_WITH_SESSION);
660 }
661 
662 /** Clear the memory of session so it doesn't leave key material behind */
663 static void
664 scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev,
665 		struct rte_cryptodev_sym_session *sess)
666 {
667 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
668 
669 	scheduler_session_destroy(sess, sched_ctx, RTE_CRYPTO_OP_WITH_SESSION);
670 }
671 
672 static struct rte_cryptodev_ops scheduler_pmd_ops = {
673 		.dev_configure		= scheduler_pmd_config,
674 		.dev_start		= scheduler_pmd_start,
675 		.dev_stop		= scheduler_pmd_stop,
676 		.dev_close		= scheduler_pmd_close,
677 
678 		.stats_get		= scheduler_pmd_stats_get,
679 		.stats_reset		= scheduler_pmd_stats_reset,
680 
681 		.dev_infos_get		= scheduler_pmd_info_get,
682 
683 		.queue_pair_setup	= scheduler_pmd_qp_setup,
684 		.queue_pair_release	= scheduler_pmd_qp_release,
685 
686 		.sym_session_get_size	= scheduler_pmd_sym_session_get_size,
687 		.sym_session_configure	= scheduler_pmd_sym_session_configure,
688 		.sym_session_clear	= scheduler_pmd_sym_session_clear,
689 };
690 
691 struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;
692 
693 /** Configure a scheduler session from a security session configuration */
694 static int
695 scheduler_pmd_sec_sess_create(void *dev, struct rte_security_session_conf *conf,
696 			struct rte_security_session *sess)
697 {
698 	struct rte_cryptodev *cdev = dev;
699 	struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
700 
701 	/* Check for supported security protocols */
702 	if (!scheduler_check_sec_proto_supp(conf->action_type, conf->protocol)) {
703 		CR_SCHED_LOG(ERR, "Unsupported security protocol");
704 		return -ENOTSUP;
705 	}
706 
707 	return scheduler_session_create(sess, conf, sched_ctx, RTE_CRYPTO_OP_SECURITY_SESSION);
708 }
709 
710 /** Clear the memory of session so it doesn't leave key material behind */
711 static int
712 scheduler_pmd_sec_sess_destroy(void *dev,
713 			       struct rte_security_session *sess)
714 {
715 	struct rte_cryptodev *cdev = dev;
716 	struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
717 
718 	scheduler_session_destroy(sess, sched_ctx, RTE_CRYPTO_OP_SECURITY_SESSION);
719 
720 	return 0;
721 }
722 
723 /** Get sync security capabilities for scheduler pmds */
724 static const struct rte_security_capability *
725 scheduler_pmd_sec_capa_get(void *dev)
726 {
727 	struct rte_cryptodev *cdev = dev;
728 	struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
729 
730 	return sched_ctx->sec_capabilities;
731 }
732 
733 static unsigned int
734 scheduler_pmd_sec_sess_size_get(void *dev)
735 {
736 	struct rte_cryptodev *cdev = dev;
737 	struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
738 
739 	return scheduler_session_size_get(sched_ctx,
740 				RTE_CRYPTO_OP_SECURITY_SESSION);
741 }
742 
743 static struct rte_security_ops scheduler_pmd_sec_ops = {
744 		.session_create = scheduler_pmd_sec_sess_create,
745 		.session_update = NULL,
746 		.session_get_size = scheduler_pmd_sec_sess_size_get,
747 		.session_stats_get = NULL,
748 		.session_destroy = scheduler_pmd_sec_sess_destroy,
749 		.set_pkt_metadata = NULL,
750 		.capabilities_get = scheduler_pmd_sec_capa_get
751 };
752 
753 struct rte_security_ops *rte_crypto_scheduler_pmd_sec_ops =
754 							&scheduler_pmd_sec_ops;
755