xref: /dpdk/drivers/crypto/scheduler/scheduler_multicore.c (revision cc32201a8d834a690d76cd281acf553182894d9f)
15566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
25566a3e3SBruce Richardson  * Copyright(c) 2017 Intel Corporation
34c07e055SKirill Rybalchenko  */
44c07e055SKirill Rybalchenko #include <unistd.h>
54c07e055SKirill Rybalchenko 
64c07e055SKirill Rybalchenko #include <rte_cryptodev.h>
74c07e055SKirill Rybalchenko #include <rte_malloc.h>
84c07e055SKirill Rybalchenko 
94c07e055SKirill Rybalchenko #include "rte_cryptodev_scheduler_operations.h"
104c07e055SKirill Rybalchenko #include "scheduler_pmd_private.h"
114c07e055SKirill Rybalchenko 
124c07e055SKirill Rybalchenko #define MC_SCHED_ENQ_RING_NAME_PREFIX	"MCS_ENQR_"
134c07e055SKirill Rybalchenko #define MC_SCHED_DEQ_RING_NAME_PREFIX	"MCS_DEQR_"
144c07e055SKirill Rybalchenko 
154c07e055SKirill Rybalchenko #define MC_SCHED_BUFFER_SIZE 32
164c07e055SKirill Rybalchenko 
1789244ea4SKirill Rybalchenko #define CRYPTO_OP_STATUS_BIT_COMPLETE	0x80
1889244ea4SKirill Rybalchenko 
194c07e055SKirill Rybalchenko /** multi-core scheduler context */
204c07e055SKirill Rybalchenko struct mc_scheduler_ctx {
214c07e055SKirill Rybalchenko 	uint32_t num_workers;             /**< Number of workers polling */
224c07e055SKirill Rybalchenko 	uint32_t stop_signal;
234c07e055SKirill Rybalchenko 
244c07e055SKirill Rybalchenko 	struct rte_ring *sched_enq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
254c07e055SKirill Rybalchenko 	struct rte_ring *sched_deq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
264c07e055SKirill Rybalchenko };
274c07e055SKirill Rybalchenko 
284c07e055SKirill Rybalchenko struct mc_scheduler_qp_ctx {
294c07e055SKirill Rybalchenko 	struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
304c07e055SKirill Rybalchenko 	uint32_t nb_slaves;
314c07e055SKirill Rybalchenko 
324c07e055SKirill Rybalchenko 	uint32_t last_enq_worker_idx;
334c07e055SKirill Rybalchenko 	uint32_t last_deq_worker_idx;
344c07e055SKirill Rybalchenko 
354c07e055SKirill Rybalchenko 	struct mc_scheduler_ctx *mc_private_ctx;
364c07e055SKirill Rybalchenko };
374c07e055SKirill Rybalchenko 
384c07e055SKirill Rybalchenko static uint16_t
394c07e055SKirill Rybalchenko schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
404c07e055SKirill Rybalchenko {
414c07e055SKirill Rybalchenko 	struct mc_scheduler_qp_ctx *mc_qp_ctx =
424c07e055SKirill Rybalchenko 			((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
434c07e055SKirill Rybalchenko 	struct mc_scheduler_ctx *mc_ctx = mc_qp_ctx->mc_private_ctx;
444c07e055SKirill Rybalchenko 	uint32_t worker_idx = mc_qp_ctx->last_enq_worker_idx;
454c07e055SKirill Rybalchenko 	uint16_t i, processed_ops = 0;
464c07e055SKirill Rybalchenko 
474c07e055SKirill Rybalchenko 	if (unlikely(nb_ops == 0))
484c07e055SKirill Rybalchenko 		return 0;
494c07e055SKirill Rybalchenko 
504c07e055SKirill Rybalchenko 	for (i = 0; i <  mc_ctx->num_workers && nb_ops != 0; i++) {
514c07e055SKirill Rybalchenko 		struct rte_ring *enq_ring = mc_ctx->sched_enq_ring[worker_idx];
524c07e055SKirill Rybalchenko 		uint16_t nb_queue_ops = rte_ring_enqueue_burst(enq_ring,
534c07e055SKirill Rybalchenko 			(void *)(&ops[processed_ops]), nb_ops, NULL);
544c07e055SKirill Rybalchenko 
554c07e055SKirill Rybalchenko 		nb_ops -= nb_queue_ops;
564c07e055SKirill Rybalchenko 		processed_ops += nb_queue_ops;
574c07e055SKirill Rybalchenko 
584c07e055SKirill Rybalchenko 		if (++worker_idx == mc_ctx->num_workers)
594c07e055SKirill Rybalchenko 			worker_idx = 0;
604c07e055SKirill Rybalchenko 	}
614c07e055SKirill Rybalchenko 	mc_qp_ctx->last_enq_worker_idx = worker_idx;
624c07e055SKirill Rybalchenko 
634c07e055SKirill Rybalchenko 	return processed_ops;
644c07e055SKirill Rybalchenko }
654c07e055SKirill Rybalchenko 
664c07e055SKirill Rybalchenko static uint16_t
674c07e055SKirill Rybalchenko schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
684c07e055SKirill Rybalchenko 		uint16_t nb_ops)
694c07e055SKirill Rybalchenko {
704c07e055SKirill Rybalchenko 	struct rte_ring *order_ring =
714c07e055SKirill Rybalchenko 			((struct scheduler_qp_ctx *)qp)->order_ring;
724c07e055SKirill Rybalchenko 	uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
734c07e055SKirill Rybalchenko 			nb_ops);
744c07e055SKirill Rybalchenko 	uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
754c07e055SKirill Rybalchenko 			nb_ops_to_enq);
764c07e055SKirill Rybalchenko 
774c07e055SKirill Rybalchenko 	scheduler_order_insert(order_ring, ops, nb_ops_enqd);
784c07e055SKirill Rybalchenko 
794c07e055SKirill Rybalchenko 	return nb_ops_enqd;
804c07e055SKirill Rybalchenko }
814c07e055SKirill Rybalchenko 
824c07e055SKirill Rybalchenko 
834c07e055SKirill Rybalchenko static uint16_t
844c07e055SKirill Rybalchenko schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
854c07e055SKirill Rybalchenko {
864c07e055SKirill Rybalchenko 	struct mc_scheduler_qp_ctx *mc_qp_ctx =
874c07e055SKirill Rybalchenko 			((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
884c07e055SKirill Rybalchenko 	struct mc_scheduler_ctx *mc_ctx = mc_qp_ctx->mc_private_ctx;
894c07e055SKirill Rybalchenko 	uint32_t worker_idx = mc_qp_ctx->last_deq_worker_idx;
904c07e055SKirill Rybalchenko 	uint16_t i, processed_ops = 0;
914c07e055SKirill Rybalchenko 
924c07e055SKirill Rybalchenko 	for (i = 0; i < mc_ctx->num_workers && nb_ops != 0; i++) {
934c07e055SKirill Rybalchenko 		struct rte_ring *deq_ring = mc_ctx->sched_deq_ring[worker_idx];
944c07e055SKirill Rybalchenko 		uint16_t nb_deq_ops = rte_ring_dequeue_burst(deq_ring,
954c07e055SKirill Rybalchenko 			(void *)(&ops[processed_ops]), nb_ops, NULL);
964c07e055SKirill Rybalchenko 
974c07e055SKirill Rybalchenko 		nb_ops -= nb_deq_ops;
984c07e055SKirill Rybalchenko 		processed_ops += nb_deq_ops;
994c07e055SKirill Rybalchenko 		if (++worker_idx == mc_ctx->num_workers)
1004c07e055SKirill Rybalchenko 			worker_idx = 0;
1014c07e055SKirill Rybalchenko 	}
1024c07e055SKirill Rybalchenko 
1034c07e055SKirill Rybalchenko 	mc_qp_ctx->last_deq_worker_idx = worker_idx;
1044c07e055SKirill Rybalchenko 
1054c07e055SKirill Rybalchenko 	return processed_ops;
1064c07e055SKirill Rybalchenko 
1074c07e055SKirill Rybalchenko }
1084c07e055SKirill Rybalchenko 
1094c07e055SKirill Rybalchenko static uint16_t
1104c07e055SKirill Rybalchenko schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
1114c07e055SKirill Rybalchenko 		uint16_t nb_ops)
1124c07e055SKirill Rybalchenko {
11389244ea4SKirill Rybalchenko 	struct rte_ring *order_ring = ((struct scheduler_qp_ctx *)qp)->order_ring;
11489244ea4SKirill Rybalchenko 	struct rte_crypto_op *op;
11589244ea4SKirill Rybalchenko 	uint32_t nb_objs = rte_ring_count(order_ring);
11689244ea4SKirill Rybalchenko 	uint32_t nb_ops_to_deq = 0;
11789244ea4SKirill Rybalchenko 	uint32_t nb_ops_deqd = 0;
1184c07e055SKirill Rybalchenko 
11989244ea4SKirill Rybalchenko 	if (nb_objs > nb_ops)
12089244ea4SKirill Rybalchenko 		nb_objs = nb_ops;
12189244ea4SKirill Rybalchenko 
12289244ea4SKirill Rybalchenko 	while (nb_ops_to_deq < nb_objs) {
12389244ea4SKirill Rybalchenko 		SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op);
12489244ea4SKirill Rybalchenko 
12589244ea4SKirill Rybalchenko 		if (!(op->status & CRYPTO_OP_STATUS_BIT_COMPLETE))
12689244ea4SKirill Rybalchenko 			break;
12789244ea4SKirill Rybalchenko 
12889244ea4SKirill Rybalchenko 		op->status &= ~CRYPTO_OP_STATUS_BIT_COMPLETE;
12989244ea4SKirill Rybalchenko 		nb_ops_to_deq++;
13089244ea4SKirill Rybalchenko 	}
13189244ea4SKirill Rybalchenko 
13289244ea4SKirill Rybalchenko 	if (nb_ops_to_deq) {
13389244ea4SKirill Rybalchenko 		nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring,
13489244ea4SKirill Rybalchenko 				(void **)ops, nb_ops_to_deq, NULL);
13589244ea4SKirill Rybalchenko 	}
13689244ea4SKirill Rybalchenko 
13789244ea4SKirill Rybalchenko 	return nb_ops_deqd;
1384c07e055SKirill Rybalchenko }
1394c07e055SKirill Rybalchenko 
1404c07e055SKirill Rybalchenko static int
1414c07e055SKirill Rybalchenko slave_attach(__rte_unused struct rte_cryptodev *dev,
1424c07e055SKirill Rybalchenko 		__rte_unused uint8_t slave_id)
1434c07e055SKirill Rybalchenko {
1444c07e055SKirill Rybalchenko 	return 0;
1454c07e055SKirill Rybalchenko }
1464c07e055SKirill Rybalchenko 
1474c07e055SKirill Rybalchenko static int
1484c07e055SKirill Rybalchenko slave_detach(__rte_unused struct rte_cryptodev *dev,
1494c07e055SKirill Rybalchenko 		__rte_unused uint8_t slave_id)
1504c07e055SKirill Rybalchenko {
1514c07e055SKirill Rybalchenko 	return 0;
1524c07e055SKirill Rybalchenko }
1534c07e055SKirill Rybalchenko 
1544c07e055SKirill Rybalchenko static int
1554c07e055SKirill Rybalchenko mc_scheduler_worker(struct rte_cryptodev *dev)
1564c07e055SKirill Rybalchenko {
1574c07e055SKirill Rybalchenko 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
1584c07e055SKirill Rybalchenko 	struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
1594c07e055SKirill Rybalchenko 	struct rte_ring *enq_ring;
1604c07e055SKirill Rybalchenko 	struct rte_ring *deq_ring;
1614c07e055SKirill Rybalchenko 	uint32_t core_id = rte_lcore_id();
1624c07e055SKirill Rybalchenko 	int i, worker_idx = -1;
1634c07e055SKirill Rybalchenko 	struct scheduler_slave *slave;
1644c07e055SKirill Rybalchenko 	struct rte_crypto_op *enq_ops[MC_SCHED_BUFFER_SIZE];
1654c07e055SKirill Rybalchenko 	struct rte_crypto_op *deq_ops[MC_SCHED_BUFFER_SIZE];
1664c07e055SKirill Rybalchenko 	uint16_t processed_ops;
16789244ea4SKirill Rybalchenko 	uint16_t pending_enq_ops = 0;
16889244ea4SKirill Rybalchenko 	uint16_t pending_enq_ops_idx = 0;
16989244ea4SKirill Rybalchenko 	uint16_t pending_deq_ops = 0;
17089244ea4SKirill Rybalchenko 	uint16_t pending_deq_ops_idx = 0;
1714c07e055SKirill Rybalchenko 	uint16_t inflight_ops = 0;
17289244ea4SKirill Rybalchenko 	const uint8_t reordering_enabled = sched_ctx->reordering_enabled;
1734c07e055SKirill Rybalchenko 
1744c07e055SKirill Rybalchenko 	for (i = 0; i < (int)sched_ctx->nb_wc; i++) {
1754c07e055SKirill Rybalchenko 		if (sched_ctx->wc_pool[i] == core_id) {
1764c07e055SKirill Rybalchenko 			worker_idx = i;
1774c07e055SKirill Rybalchenko 			break;
1784c07e055SKirill Rybalchenko 		}
1794c07e055SKirill Rybalchenko 	}
1804c07e055SKirill Rybalchenko 	if (worker_idx == -1) {
1814c07e055SKirill Rybalchenko 		CS_LOG_ERR("worker on core %u:cannot find worker index!\n", core_id);
1824c07e055SKirill Rybalchenko 		return -1;
1834c07e055SKirill Rybalchenko 	}
1844c07e055SKirill Rybalchenko 
1854c07e055SKirill Rybalchenko 	slave = &sched_ctx->slaves[worker_idx];
1864c07e055SKirill Rybalchenko 	enq_ring = mc_ctx->sched_enq_ring[worker_idx];
1874c07e055SKirill Rybalchenko 	deq_ring = mc_ctx->sched_deq_ring[worker_idx];
1884c07e055SKirill Rybalchenko 
1894c07e055SKirill Rybalchenko 	while (!mc_ctx->stop_signal) {
19089244ea4SKirill Rybalchenko 		if (pending_enq_ops) {
1914c07e055SKirill Rybalchenko 			processed_ops =
1924c07e055SKirill Rybalchenko 				rte_cryptodev_enqueue_burst(slave->dev_id,
19389244ea4SKirill Rybalchenko 					slave->qp_id, &enq_ops[pending_enq_ops_idx],
19489244ea4SKirill Rybalchenko 					pending_enq_ops);
19589244ea4SKirill Rybalchenko 			pending_enq_ops -= processed_ops;
19689244ea4SKirill Rybalchenko 			pending_enq_ops_idx += processed_ops;
1974c07e055SKirill Rybalchenko 			inflight_ops += processed_ops;
19889244ea4SKirill Rybalchenko 		} else {
19989244ea4SKirill Rybalchenko 			processed_ops = rte_ring_dequeue_burst(enq_ring, (void *)enq_ops,
20089244ea4SKirill Rybalchenko 							MC_SCHED_BUFFER_SIZE, NULL);
20189244ea4SKirill Rybalchenko 			if (processed_ops) {
20289244ea4SKirill Rybalchenko 				pending_enq_ops_idx = rte_cryptodev_enqueue_burst(
20389244ea4SKirill Rybalchenko 							slave->dev_id, slave->qp_id,
20489244ea4SKirill Rybalchenko 							enq_ops, processed_ops);
20589244ea4SKirill Rybalchenko 				pending_enq_ops = processed_ops - pending_enq_ops_idx;
20689244ea4SKirill Rybalchenko 				inflight_ops += pending_enq_ops_idx;
2074c07e055SKirill Rybalchenko 			}
2084c07e055SKirill Rybalchenko 		}
2094c07e055SKirill Rybalchenko 
21089244ea4SKirill Rybalchenko 		if (pending_deq_ops) {
21189244ea4SKirill Rybalchenko 			processed_ops = rte_ring_enqueue_burst(
21289244ea4SKirill Rybalchenko 					deq_ring, (void *)&deq_ops[pending_deq_ops_idx],
21389244ea4SKirill Rybalchenko 							pending_deq_ops, NULL);
21489244ea4SKirill Rybalchenko 			pending_deq_ops -= processed_ops;
21589244ea4SKirill Rybalchenko 			pending_deq_ops_idx += processed_ops;
21689244ea4SKirill Rybalchenko 		} else if (inflight_ops) {
2174c07e055SKirill Rybalchenko 			processed_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
2184c07e055SKirill Rybalchenko 					slave->qp_id, deq_ops, MC_SCHED_BUFFER_SIZE);
2194c07e055SKirill Rybalchenko 			if (processed_ops) {
22089244ea4SKirill Rybalchenko 				inflight_ops -= processed_ops;
22189244ea4SKirill Rybalchenko 				if (reordering_enabled) {
22289244ea4SKirill Rybalchenko 					uint16_t j;
22389244ea4SKirill Rybalchenko 
22489244ea4SKirill Rybalchenko 					for (j = 0; j < processed_ops; j++) {
22589244ea4SKirill Rybalchenko 						deq_ops[j]->status |=
22689244ea4SKirill Rybalchenko 							CRYPTO_OP_STATUS_BIT_COMPLETE;
22789244ea4SKirill Rybalchenko 					}
22889244ea4SKirill Rybalchenko 				} else {
22989244ea4SKirill Rybalchenko 					pending_deq_ops_idx = rte_ring_enqueue_burst(
23089244ea4SKirill Rybalchenko 						deq_ring, (void *)deq_ops, processed_ops,
23189244ea4SKirill Rybalchenko 						NULL);
23289244ea4SKirill Rybalchenko 					pending_deq_ops = processed_ops -
23389244ea4SKirill Rybalchenko 								pending_deq_ops_idx;
23489244ea4SKirill Rybalchenko 				}
2354c07e055SKirill Rybalchenko 			}
2364c07e055SKirill Rybalchenko 		}
2374c07e055SKirill Rybalchenko 
2384c07e055SKirill Rybalchenko 		rte_pause();
2394c07e055SKirill Rybalchenko 	}
2404c07e055SKirill Rybalchenko 
2414c07e055SKirill Rybalchenko 	return 0;
2424c07e055SKirill Rybalchenko }
2434c07e055SKirill Rybalchenko 
2444c07e055SKirill Rybalchenko static int
2454c07e055SKirill Rybalchenko scheduler_start(struct rte_cryptodev *dev)
2464c07e055SKirill Rybalchenko {
2474c07e055SKirill Rybalchenko 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
2484c07e055SKirill Rybalchenko 	struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
2494c07e055SKirill Rybalchenko 	uint16_t i;
2504c07e055SKirill Rybalchenko 
2514c07e055SKirill Rybalchenko 	mc_ctx->stop_signal = 0;
2524c07e055SKirill Rybalchenko 
2534c07e055SKirill Rybalchenko 	for (i = 0; i < sched_ctx->nb_wc; i++)
2544c07e055SKirill Rybalchenko 		rte_eal_remote_launch(
2554c07e055SKirill Rybalchenko 			(lcore_function_t *)mc_scheduler_worker, dev,
2564c07e055SKirill Rybalchenko 					sched_ctx->wc_pool[i]);
2574c07e055SKirill Rybalchenko 
2584c07e055SKirill Rybalchenko 	if (sched_ctx->reordering_enabled) {
2594c07e055SKirill Rybalchenko 		dev->enqueue_burst = &schedule_enqueue_ordering;
2604c07e055SKirill Rybalchenko 		dev->dequeue_burst = &schedule_dequeue_ordering;
2614c07e055SKirill Rybalchenko 	} else {
2624c07e055SKirill Rybalchenko 		dev->enqueue_burst = &schedule_enqueue;
2634c07e055SKirill Rybalchenko 		dev->dequeue_burst = &schedule_dequeue;
2644c07e055SKirill Rybalchenko 	}
2654c07e055SKirill Rybalchenko 
2664c07e055SKirill Rybalchenko 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
2674c07e055SKirill Rybalchenko 		struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
2684c07e055SKirill Rybalchenko 		struct mc_scheduler_qp_ctx *mc_qp_ctx =
2694c07e055SKirill Rybalchenko 				qp_ctx->private_qp_ctx;
2704c07e055SKirill Rybalchenko 		uint32_t j;
2714c07e055SKirill Rybalchenko 
2724c07e055SKirill Rybalchenko 		memset(mc_qp_ctx->slaves, 0,
2734c07e055SKirill Rybalchenko 				RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *
2744c07e055SKirill Rybalchenko 				sizeof(struct scheduler_slave));
2754c07e055SKirill Rybalchenko 		for (j = 0; j < sched_ctx->nb_slaves; j++) {
2764c07e055SKirill Rybalchenko 			mc_qp_ctx->slaves[j].dev_id =
2774c07e055SKirill Rybalchenko 					sched_ctx->slaves[j].dev_id;
2784c07e055SKirill Rybalchenko 			mc_qp_ctx->slaves[j].qp_id = i;
2794c07e055SKirill Rybalchenko 		}
2804c07e055SKirill Rybalchenko 
2814c07e055SKirill Rybalchenko 		mc_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
2824c07e055SKirill Rybalchenko 
2834c07e055SKirill Rybalchenko 		mc_qp_ctx->last_enq_worker_idx = 0;
2844c07e055SKirill Rybalchenko 		mc_qp_ctx->last_deq_worker_idx = 0;
2854c07e055SKirill Rybalchenko 	}
2864c07e055SKirill Rybalchenko 
2874c07e055SKirill Rybalchenko 	return 0;
2884c07e055SKirill Rybalchenko }
2894c07e055SKirill Rybalchenko 
2904c07e055SKirill Rybalchenko static int
2914c07e055SKirill Rybalchenko scheduler_stop(struct rte_cryptodev *dev)
2924c07e055SKirill Rybalchenko {
2934c07e055SKirill Rybalchenko 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
2944c07e055SKirill Rybalchenko 	struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
295a76f6b1bSJan Blunck 	uint16_t i;
2964c07e055SKirill Rybalchenko 
2974c07e055SKirill Rybalchenko 	mc_ctx->stop_signal = 1;
2984c07e055SKirill Rybalchenko 
299a76f6b1bSJan Blunck 	for (i = 0; i < sched_ctx->nb_wc; i++)
3004c07e055SKirill Rybalchenko 		rte_eal_wait_lcore(sched_ctx->wc_pool[i]);
3014c07e055SKirill Rybalchenko 
3024c07e055SKirill Rybalchenko 	return 0;
3034c07e055SKirill Rybalchenko }
3044c07e055SKirill Rybalchenko 
3054c07e055SKirill Rybalchenko static int
3064c07e055SKirill Rybalchenko scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
3074c07e055SKirill Rybalchenko {
3084c07e055SKirill Rybalchenko 	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
3094c07e055SKirill Rybalchenko 	struct mc_scheduler_qp_ctx *mc_qp_ctx;
3104c07e055SKirill Rybalchenko 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
3114c07e055SKirill Rybalchenko 	struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
3124c07e055SKirill Rybalchenko 
3134c07e055SKirill Rybalchenko 	mc_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*mc_qp_ctx), 0,
3144c07e055SKirill Rybalchenko 			rte_socket_id());
3154c07e055SKirill Rybalchenko 	if (!mc_qp_ctx) {
3164c07e055SKirill Rybalchenko 		CS_LOG_ERR("failed allocate memory for private queue pair");
3174c07e055SKirill Rybalchenko 		return -ENOMEM;
3184c07e055SKirill Rybalchenko 	}
3194c07e055SKirill Rybalchenko 
3204c07e055SKirill Rybalchenko 	mc_qp_ctx->mc_private_ctx = mc_ctx;
3214c07e055SKirill Rybalchenko 	qp_ctx->private_qp_ctx = (void *)mc_qp_ctx;
3224c07e055SKirill Rybalchenko 
3234c07e055SKirill Rybalchenko 
3244c07e055SKirill Rybalchenko 	return 0;
3254c07e055SKirill Rybalchenko }
3264c07e055SKirill Rybalchenko 
3274c07e055SKirill Rybalchenko static int
3284c07e055SKirill Rybalchenko scheduler_create_private_ctx(struct rte_cryptodev *dev)
3294c07e055SKirill Rybalchenko {
3304c07e055SKirill Rybalchenko 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
33106f0a569SPablo de Lara 	struct mc_scheduler_ctx *mc_ctx = NULL;
332a76f6b1bSJan Blunck 	uint16_t i;
3334c07e055SKirill Rybalchenko 
33406f0a569SPablo de Lara 	if (sched_ctx->private_ctx) {
3354c07e055SKirill Rybalchenko 		rte_free(sched_ctx->private_ctx);
33606f0a569SPablo de Lara 		sched_ctx->private_ctx = NULL;
33706f0a569SPablo de Lara 	}
3384c07e055SKirill Rybalchenko 
3394c07e055SKirill Rybalchenko 	mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0,
3404c07e055SKirill Rybalchenko 			rte_socket_id());
3414c07e055SKirill Rybalchenko 	if (!mc_ctx) {
3424c07e055SKirill Rybalchenko 		CS_LOG_ERR("failed allocate memory");
3434c07e055SKirill Rybalchenko 		return -ENOMEM;
3444c07e055SKirill Rybalchenko 	}
3454c07e055SKirill Rybalchenko 
3464c07e055SKirill Rybalchenko 	mc_ctx->num_workers = sched_ctx->nb_wc;
347a76f6b1bSJan Blunck 	for (i = 0; i < sched_ctx->nb_wc; i++) {
3484c07e055SKirill Rybalchenko 		char r_name[16];
3494c07e055SKirill Rybalchenko 
3504c07e055SKirill Rybalchenko 		snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX "%u", i);
351*cc32201aSKirill Rybalchenko 		mc_ctx->sched_enq_ring[i] = rte_ring_lookup(r_name);
3524c07e055SKirill Rybalchenko 		if (!mc_ctx->sched_enq_ring[i]) {
353*cc32201aSKirill Rybalchenko 			mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name,
354*cc32201aSKirill Rybalchenko 						PER_SLAVE_BUFF_SIZE,
355*cc32201aSKirill Rybalchenko 						rte_socket_id(),
356*cc32201aSKirill Rybalchenko 						RING_F_SC_DEQ | RING_F_SP_ENQ);
357*cc32201aSKirill Rybalchenko 			if (!mc_ctx->sched_enq_ring[i]) {
358*cc32201aSKirill Rybalchenko 				CS_LOG_ERR("Cannot create ring for worker %u",
359*cc32201aSKirill Rybalchenko 					   i);
3607b2491a6SPablo de Lara 				goto exit;
3614c07e055SKirill Rybalchenko 			}
362*cc32201aSKirill Rybalchenko 		}
3634c07e055SKirill Rybalchenko 		snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX "%u", i);
364*cc32201aSKirill Rybalchenko 		mc_ctx->sched_deq_ring[i] = rte_ring_lookup(r_name);
3654c07e055SKirill Rybalchenko 		if (!mc_ctx->sched_deq_ring[i]) {
366*cc32201aSKirill Rybalchenko 			mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name,
367*cc32201aSKirill Rybalchenko 						PER_SLAVE_BUFF_SIZE,
368*cc32201aSKirill Rybalchenko 						rte_socket_id(),
369*cc32201aSKirill Rybalchenko 						RING_F_SC_DEQ | RING_F_SP_ENQ);
370*cc32201aSKirill Rybalchenko 			if (!mc_ctx->sched_deq_ring[i]) {
371*cc32201aSKirill Rybalchenko 				CS_LOG_ERR("Cannot create ring for worker %u",
372*cc32201aSKirill Rybalchenko 					   i);
3737b2491a6SPablo de Lara 				goto exit;
3744c07e055SKirill Rybalchenko 			}
3754c07e055SKirill Rybalchenko 		}
376*cc32201aSKirill Rybalchenko 	}
3774c07e055SKirill Rybalchenko 
3784c07e055SKirill Rybalchenko 	sched_ctx->private_ctx = (void *)mc_ctx;
3794c07e055SKirill Rybalchenko 
3804c07e055SKirill Rybalchenko 	return 0;
3817b2491a6SPablo de Lara 
3827b2491a6SPablo de Lara exit:
3837b2491a6SPablo de Lara 	for (i = 0; i < sched_ctx->nb_wc; i++) {
3847b2491a6SPablo de Lara 		rte_ring_free(mc_ctx->sched_enq_ring[i]);
3857b2491a6SPablo de Lara 		rte_ring_free(mc_ctx->sched_deq_ring[i]);
3867b2491a6SPablo de Lara 	}
3877b2491a6SPablo de Lara 	rte_free(mc_ctx);
3887b2491a6SPablo de Lara 
3897b2491a6SPablo de Lara 	return -1;
3904c07e055SKirill Rybalchenko }
3914c07e055SKirill Rybalchenko 
3924c07e055SKirill Rybalchenko struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
3934c07e055SKirill Rybalchenko 	slave_attach,
3944c07e055SKirill Rybalchenko 	slave_detach,
3954c07e055SKirill Rybalchenko 	scheduler_start,
3964c07e055SKirill Rybalchenko 	scheduler_stop,
3974c07e055SKirill Rybalchenko 	scheduler_config_qp,
3984c07e055SKirill Rybalchenko 	scheduler_create_private_ctx,
3994c07e055SKirill Rybalchenko 	NULL,	/* option_set */
4004c07e055SKirill Rybalchenko 	NULL	/* option_get */
4014c07e055SKirill Rybalchenko };
4024c07e055SKirill Rybalchenko 
4034c07e055SKirill Rybalchenko struct rte_cryptodev_scheduler mc_scheduler = {
4044c07e055SKirill Rybalchenko 		.name = "multicore-scheduler",
4054c07e055SKirill Rybalchenko 		.description = "scheduler which will run burst across multiple cpu cores",
4064c07e055SKirill Rybalchenko 		.mode = CDEV_SCHED_MODE_MULTICORE,
4074c07e055SKirill Rybalchenko 		.ops = &scheduler_mc_ops
4084c07e055SKirill Rybalchenko };
4094c07e055SKirill Rybalchenko 
4104c07e055SKirill Rybalchenko struct rte_cryptodev_scheduler *multicore_scheduler = &mc_scheduler;
411