15566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
25566a3e3SBruce Richardson * Copyright(c) 2017 Intel Corporation
34c07e055SKirill Rybalchenko */
44c07e055SKirill Rybalchenko #include <unistd.h>
54c07e055SKirill Rybalchenko
692cb1309SAkhil Goyal #include <cryptodev_pmd.h>
74c07e055SKirill Rybalchenko #include <rte_malloc.h>
84c07e055SKirill Rybalchenko
94c07e055SKirill Rybalchenko #include "rte_cryptodev_scheduler_operations.h"
104c07e055SKirill Rybalchenko #include "scheduler_pmd_private.h"
114c07e055SKirill Rybalchenko
124c07e055SKirill Rybalchenko #define MC_SCHED_ENQ_RING_NAME_PREFIX "MCS_ENQR_"
134c07e055SKirill Rybalchenko #define MC_SCHED_DEQ_RING_NAME_PREFIX "MCS_DEQR_"
144c07e055SKirill Rybalchenko
154c07e055SKirill Rybalchenko #define MC_SCHED_BUFFER_SIZE 32
164c07e055SKirill Rybalchenko
1789244ea4SKirill Rybalchenko #define CRYPTO_OP_STATUS_BIT_COMPLETE 0x80
1889244ea4SKirill Rybalchenko
194c07e055SKirill Rybalchenko /** multi-core scheduler context */
204c07e055SKirill Rybalchenko struct mc_scheduler_ctx {
214c07e055SKirill Rybalchenko uint32_t num_workers; /**< Number of workers polling */
224c07e055SKirill Rybalchenko uint32_t stop_signal;
234c07e055SKirill Rybalchenko
241b78e3f2SKirill Rybalchenko struct rte_ring *sched_enq_ring[RTE_MAX_LCORE];
251b78e3f2SKirill Rybalchenko struct rte_ring *sched_deq_ring[RTE_MAX_LCORE];
264c07e055SKirill Rybalchenko };
274c07e055SKirill Rybalchenko
284c07e055SKirill Rybalchenko struct mc_scheduler_qp_ctx {
2985b00824SAdam Dybkowski struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
3085b00824SAdam Dybkowski uint32_t nb_workers;
314c07e055SKirill Rybalchenko
324c07e055SKirill Rybalchenko uint32_t last_enq_worker_idx;
334c07e055SKirill Rybalchenko uint32_t last_deq_worker_idx;
344c07e055SKirill Rybalchenko
354c07e055SKirill Rybalchenko struct mc_scheduler_ctx *mc_private_ctx;
364c07e055SKirill Rybalchenko };
374c07e055SKirill Rybalchenko
384c07e055SKirill Rybalchenko static uint16_t
schedule_enqueue(void * qp,struct rte_crypto_op ** ops,uint16_t nb_ops)394c07e055SKirill Rybalchenko schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
404c07e055SKirill Rybalchenko {
414c07e055SKirill Rybalchenko struct mc_scheduler_qp_ctx *mc_qp_ctx =
424c07e055SKirill Rybalchenko ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
434c07e055SKirill Rybalchenko struct mc_scheduler_ctx *mc_ctx = mc_qp_ctx->mc_private_ctx;
444c07e055SKirill Rybalchenko uint32_t worker_idx = mc_qp_ctx->last_enq_worker_idx;
454c07e055SKirill Rybalchenko uint16_t i, processed_ops = 0;
464c07e055SKirill Rybalchenko
474c07e055SKirill Rybalchenko if (unlikely(nb_ops == 0))
484c07e055SKirill Rybalchenko return 0;
494c07e055SKirill Rybalchenko
504c07e055SKirill Rybalchenko for (i = 0; i < mc_ctx->num_workers && nb_ops != 0; i++) {
514c07e055SKirill Rybalchenko struct rte_ring *enq_ring = mc_ctx->sched_enq_ring[worker_idx];
524c07e055SKirill Rybalchenko uint16_t nb_queue_ops = rte_ring_enqueue_burst(enq_ring,
534c07e055SKirill Rybalchenko (void *)(&ops[processed_ops]), nb_ops, NULL);
544c07e055SKirill Rybalchenko
554c07e055SKirill Rybalchenko nb_ops -= nb_queue_ops;
564c07e055SKirill Rybalchenko processed_ops += nb_queue_ops;
574c07e055SKirill Rybalchenko
584c07e055SKirill Rybalchenko if (++worker_idx == mc_ctx->num_workers)
594c07e055SKirill Rybalchenko worker_idx = 0;
604c07e055SKirill Rybalchenko }
614c07e055SKirill Rybalchenko mc_qp_ctx->last_enq_worker_idx = worker_idx;
624c07e055SKirill Rybalchenko
634c07e055SKirill Rybalchenko return processed_ops;
644c07e055SKirill Rybalchenko }
654c07e055SKirill Rybalchenko
664c07e055SKirill Rybalchenko static uint16_t
schedule_enqueue_ordering(void * qp,struct rte_crypto_op ** ops,uint16_t nb_ops)674c07e055SKirill Rybalchenko schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
684c07e055SKirill Rybalchenko uint16_t nb_ops)
694c07e055SKirill Rybalchenko {
704c07e055SKirill Rybalchenko struct rte_ring *order_ring =
714c07e055SKirill Rybalchenko ((struct scheduler_qp_ctx *)qp)->order_ring;
724c07e055SKirill Rybalchenko uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
734c07e055SKirill Rybalchenko nb_ops);
744c07e055SKirill Rybalchenko uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
754c07e055SKirill Rybalchenko nb_ops_to_enq);
764c07e055SKirill Rybalchenko
774c07e055SKirill Rybalchenko scheduler_order_insert(order_ring, ops, nb_ops_enqd);
784c07e055SKirill Rybalchenko
794c07e055SKirill Rybalchenko return nb_ops_enqd;
804c07e055SKirill Rybalchenko }
814c07e055SKirill Rybalchenko
824c07e055SKirill Rybalchenko
834c07e055SKirill Rybalchenko static uint16_t
schedule_dequeue(void * qp,struct rte_crypto_op ** ops,uint16_t nb_ops)844c07e055SKirill Rybalchenko schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
854c07e055SKirill Rybalchenko {
864c07e055SKirill Rybalchenko struct mc_scheduler_qp_ctx *mc_qp_ctx =
874c07e055SKirill Rybalchenko ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
884c07e055SKirill Rybalchenko struct mc_scheduler_ctx *mc_ctx = mc_qp_ctx->mc_private_ctx;
894c07e055SKirill Rybalchenko uint32_t worker_idx = mc_qp_ctx->last_deq_worker_idx;
904c07e055SKirill Rybalchenko uint16_t i, processed_ops = 0;
914c07e055SKirill Rybalchenko
924c07e055SKirill Rybalchenko for (i = 0; i < mc_ctx->num_workers && nb_ops != 0; i++) {
934c07e055SKirill Rybalchenko struct rte_ring *deq_ring = mc_ctx->sched_deq_ring[worker_idx];
944c07e055SKirill Rybalchenko uint16_t nb_deq_ops = rte_ring_dequeue_burst(deq_ring,
954c07e055SKirill Rybalchenko (void *)(&ops[processed_ops]), nb_ops, NULL);
964c07e055SKirill Rybalchenko
974c07e055SKirill Rybalchenko nb_ops -= nb_deq_ops;
984c07e055SKirill Rybalchenko processed_ops += nb_deq_ops;
994c07e055SKirill Rybalchenko if (++worker_idx == mc_ctx->num_workers)
1004c07e055SKirill Rybalchenko worker_idx = 0;
1014c07e055SKirill Rybalchenko }
1024c07e055SKirill Rybalchenko
1034c07e055SKirill Rybalchenko mc_qp_ctx->last_deq_worker_idx = worker_idx;
1044c07e055SKirill Rybalchenko
1054c07e055SKirill Rybalchenko return processed_ops;
1064c07e055SKirill Rybalchenko
1074c07e055SKirill Rybalchenko }
1084c07e055SKirill Rybalchenko
1094c07e055SKirill Rybalchenko static uint16_t
schedule_dequeue_ordering(void * qp,struct rte_crypto_op ** ops,uint16_t nb_ops)1104c07e055SKirill Rybalchenko schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
1114c07e055SKirill Rybalchenko uint16_t nb_ops)
1124c07e055SKirill Rybalchenko {
1138b6c9aeeSKonstantin Ananyev struct rte_ring *order_ring =
1148b6c9aeeSKonstantin Ananyev ((struct scheduler_qp_ctx *)qp)->order_ring;
11589244ea4SKirill Rybalchenko struct rte_crypto_op *op;
1168b6c9aeeSKonstantin Ananyev uint32_t nb_objs, nb_ops_to_deq;
1174c07e055SKirill Rybalchenko
1188b6c9aeeSKonstantin Ananyev nb_objs = rte_ring_dequeue_burst_start(order_ring, (void **)ops,
1198b6c9aeeSKonstantin Ananyev nb_ops, NULL);
1208b6c9aeeSKonstantin Ananyev if (nb_objs == 0)
1218b6c9aeeSKonstantin Ananyev return 0;
12289244ea4SKirill Rybalchenko
1238b6c9aeeSKonstantin Ananyev for (nb_ops_to_deq = 0; nb_ops_to_deq != nb_objs; nb_ops_to_deq++) {
1248b6c9aeeSKonstantin Ananyev op = ops[nb_ops_to_deq];
12589244ea4SKirill Rybalchenko if (!(op->status & CRYPTO_OP_STATUS_BIT_COMPLETE))
12689244ea4SKirill Rybalchenko break;
12789244ea4SKirill Rybalchenko op->status &= ~CRYPTO_OP_STATUS_BIT_COMPLETE;
12889244ea4SKirill Rybalchenko }
12989244ea4SKirill Rybalchenko
1308b6c9aeeSKonstantin Ananyev rte_ring_dequeue_finish(order_ring, nb_ops_to_deq);
1318b6c9aeeSKonstantin Ananyev return nb_ops_to_deq;
1324c07e055SKirill Rybalchenko }
1334c07e055SKirill Rybalchenko
1344c07e055SKirill Rybalchenko static int
worker_attach(__rte_unused struct rte_cryptodev * dev,__rte_unused uint8_t worker_id)13585b00824SAdam Dybkowski worker_attach(__rte_unused struct rte_cryptodev *dev,
13685b00824SAdam Dybkowski __rte_unused uint8_t worker_id)
1374c07e055SKirill Rybalchenko {
1384c07e055SKirill Rybalchenko return 0;
1394c07e055SKirill Rybalchenko }
1404c07e055SKirill Rybalchenko
1414c07e055SKirill Rybalchenko static int
worker_detach(__rte_unused struct rte_cryptodev * dev,__rte_unused uint8_t worker_id)14285b00824SAdam Dybkowski worker_detach(__rte_unused struct rte_cryptodev *dev,
14385b00824SAdam Dybkowski __rte_unused uint8_t worker_id)
1444c07e055SKirill Rybalchenko {
1454c07e055SKirill Rybalchenko return 0;
1464c07e055SKirill Rybalchenko }
1474c07e055SKirill Rybalchenko
1484c07e055SKirill Rybalchenko static int
mc_scheduler_worker(struct rte_cryptodev * dev)1494c07e055SKirill Rybalchenko mc_scheduler_worker(struct rte_cryptodev *dev)
1504c07e055SKirill Rybalchenko {
1514c07e055SKirill Rybalchenko struct scheduler_ctx *sched_ctx = dev->data->dev_private;
1524c07e055SKirill Rybalchenko struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
1534c07e055SKirill Rybalchenko struct rte_ring *enq_ring;
1544c07e055SKirill Rybalchenko struct rte_ring *deq_ring;
1554c07e055SKirill Rybalchenko uint32_t core_id = rte_lcore_id();
1564c07e055SKirill Rybalchenko int i, worker_idx = -1;
15785b00824SAdam Dybkowski struct scheduler_worker *worker;
1584c07e055SKirill Rybalchenko struct rte_crypto_op *enq_ops[MC_SCHED_BUFFER_SIZE];
1594c07e055SKirill Rybalchenko struct rte_crypto_op *deq_ops[MC_SCHED_BUFFER_SIZE];
1604c07e055SKirill Rybalchenko uint16_t processed_ops;
16189244ea4SKirill Rybalchenko uint16_t pending_enq_ops = 0;
16289244ea4SKirill Rybalchenko uint16_t pending_enq_ops_idx = 0;
16389244ea4SKirill Rybalchenko uint16_t pending_deq_ops = 0;
16489244ea4SKirill Rybalchenko uint16_t pending_deq_ops_idx = 0;
1654c07e055SKirill Rybalchenko uint16_t inflight_ops = 0;
16689244ea4SKirill Rybalchenko const uint8_t reordering_enabled = sched_ctx->reordering_enabled;
1674c07e055SKirill Rybalchenko
1684c07e055SKirill Rybalchenko for (i = 0; i < (int)sched_ctx->nb_wc; i++) {
1694c07e055SKirill Rybalchenko if (sched_ctx->wc_pool[i] == core_id) {
1704c07e055SKirill Rybalchenko worker_idx = i;
1714c07e055SKirill Rybalchenko break;
1724c07e055SKirill Rybalchenko }
1734c07e055SKirill Rybalchenko }
1744c07e055SKirill Rybalchenko if (worker_idx == -1) {
17585aa6d34SHari Kumar CR_SCHED_LOG(ERR, "worker on core %u:cannot find worker index!",
17685aa6d34SHari Kumar core_id);
1774c07e055SKirill Rybalchenko return -1;
1784c07e055SKirill Rybalchenko }
1794c07e055SKirill Rybalchenko
18085b00824SAdam Dybkowski worker = &sched_ctx->workers[worker_idx];
1814c07e055SKirill Rybalchenko enq_ring = mc_ctx->sched_enq_ring[worker_idx];
1824c07e055SKirill Rybalchenko deq_ring = mc_ctx->sched_deq_ring[worker_idx];
1834c07e055SKirill Rybalchenko
1844c07e055SKirill Rybalchenko while (!mc_ctx->stop_signal) {
18589244ea4SKirill Rybalchenko if (pending_enq_ops) {
186*e2af4e40SDavid Coyle scheduler_set_worker_sessions(
1876812b9bfSFan Zhang &enq_ops[pending_enq_ops_idx], pending_enq_ops,
1886812b9bfSFan Zhang worker_idx);
1894c07e055SKirill Rybalchenko processed_ops =
19085b00824SAdam Dybkowski rte_cryptodev_enqueue_burst(worker->dev_id,
19185b00824SAdam Dybkowski worker->qp_id,
19285b00824SAdam Dybkowski &enq_ops[pending_enq_ops_idx],
19389244ea4SKirill Rybalchenko pending_enq_ops);
1946812b9bfSFan Zhang if (processed_ops < pending_deq_ops)
195*e2af4e40SDavid Coyle scheduler_retrieve_sessions(
1966812b9bfSFan Zhang &enq_ops[pending_enq_ops_idx +
1976812b9bfSFan Zhang processed_ops],
1986812b9bfSFan Zhang pending_deq_ops - processed_ops);
19989244ea4SKirill Rybalchenko pending_enq_ops -= processed_ops;
20089244ea4SKirill Rybalchenko pending_enq_ops_idx += processed_ops;
2014c07e055SKirill Rybalchenko inflight_ops += processed_ops;
20289244ea4SKirill Rybalchenko } else {
20389244ea4SKirill Rybalchenko processed_ops = rte_ring_dequeue_burst(enq_ring, (void *)enq_ops,
20489244ea4SKirill Rybalchenko MC_SCHED_BUFFER_SIZE, NULL);
20589244ea4SKirill Rybalchenko if (processed_ops) {
206*e2af4e40SDavid Coyle scheduler_set_worker_sessions(enq_ops,
2076812b9bfSFan Zhang processed_ops, worker_idx);
20889244ea4SKirill Rybalchenko pending_enq_ops_idx = rte_cryptodev_enqueue_burst(
20985b00824SAdam Dybkowski worker->dev_id, worker->qp_id,
21089244ea4SKirill Rybalchenko enq_ops, processed_ops);
2116812b9bfSFan Zhang if (pending_enq_ops_idx < processed_ops)
212*e2af4e40SDavid Coyle scheduler_retrieve_sessions(
2136812b9bfSFan Zhang enq_ops + pending_enq_ops_idx,
2146812b9bfSFan Zhang processed_ops -
2156812b9bfSFan Zhang pending_enq_ops_idx);
21689244ea4SKirill Rybalchenko pending_enq_ops = processed_ops - pending_enq_ops_idx;
21789244ea4SKirill Rybalchenko inflight_ops += pending_enq_ops_idx;
2184c07e055SKirill Rybalchenko }
2194c07e055SKirill Rybalchenko }
2204c07e055SKirill Rybalchenko
22189244ea4SKirill Rybalchenko if (pending_deq_ops) {
22289244ea4SKirill Rybalchenko processed_ops = rte_ring_enqueue_burst(
22389244ea4SKirill Rybalchenko deq_ring, (void *)&deq_ops[pending_deq_ops_idx],
22489244ea4SKirill Rybalchenko pending_deq_ops, NULL);
22589244ea4SKirill Rybalchenko pending_deq_ops -= processed_ops;
22689244ea4SKirill Rybalchenko pending_deq_ops_idx += processed_ops;
22789244ea4SKirill Rybalchenko } else if (inflight_ops) {
22885b00824SAdam Dybkowski processed_ops = rte_cryptodev_dequeue_burst(
22985b00824SAdam Dybkowski worker->dev_id, worker->qp_id, deq_ops,
23085b00824SAdam Dybkowski MC_SCHED_BUFFER_SIZE);
2314c07e055SKirill Rybalchenko if (processed_ops) {
232*e2af4e40SDavid Coyle scheduler_retrieve_sessions(deq_ops,
2336812b9bfSFan Zhang processed_ops);
23489244ea4SKirill Rybalchenko inflight_ops -= processed_ops;
23589244ea4SKirill Rybalchenko if (reordering_enabled) {
23689244ea4SKirill Rybalchenko uint16_t j;
23789244ea4SKirill Rybalchenko
23889244ea4SKirill Rybalchenko for (j = 0; j < processed_ops; j++) {
23989244ea4SKirill Rybalchenko deq_ops[j]->status |=
24089244ea4SKirill Rybalchenko CRYPTO_OP_STATUS_BIT_COMPLETE;
24189244ea4SKirill Rybalchenko }
24289244ea4SKirill Rybalchenko } else {
24389244ea4SKirill Rybalchenko pending_deq_ops_idx = rte_ring_enqueue_burst(
24489244ea4SKirill Rybalchenko deq_ring, (void *)deq_ops, processed_ops,
24589244ea4SKirill Rybalchenko NULL);
24689244ea4SKirill Rybalchenko pending_deq_ops = processed_ops -
24789244ea4SKirill Rybalchenko pending_deq_ops_idx;
24889244ea4SKirill Rybalchenko }
2494c07e055SKirill Rybalchenko }
2504c07e055SKirill Rybalchenko }
2514c07e055SKirill Rybalchenko
2524c07e055SKirill Rybalchenko rte_pause();
2534c07e055SKirill Rybalchenko }
2544c07e055SKirill Rybalchenko
2554c07e055SKirill Rybalchenko return 0;
2564c07e055SKirill Rybalchenko }
2574c07e055SKirill Rybalchenko
2584c07e055SKirill Rybalchenko static int
scheduler_start(struct rte_cryptodev * dev)2594c07e055SKirill Rybalchenko scheduler_start(struct rte_cryptodev *dev)
2604c07e055SKirill Rybalchenko {
2614c07e055SKirill Rybalchenko struct scheduler_ctx *sched_ctx = dev->data->dev_private;
2624c07e055SKirill Rybalchenko struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
2634c07e055SKirill Rybalchenko uint16_t i;
2644c07e055SKirill Rybalchenko
2654c07e055SKirill Rybalchenko mc_ctx->stop_signal = 0;
2664c07e055SKirill Rybalchenko
2674c07e055SKirill Rybalchenko for (i = 0; i < sched_ctx->nb_wc; i++)
2684c07e055SKirill Rybalchenko rte_eal_remote_launch(
2694c07e055SKirill Rybalchenko (lcore_function_t *)mc_scheduler_worker, dev,
2704c07e055SKirill Rybalchenko sched_ctx->wc_pool[i]);
2714c07e055SKirill Rybalchenko
2724c07e055SKirill Rybalchenko if (sched_ctx->reordering_enabled) {
2734c07e055SKirill Rybalchenko dev->enqueue_burst = &schedule_enqueue_ordering;
2744c07e055SKirill Rybalchenko dev->dequeue_burst = &schedule_dequeue_ordering;
2754c07e055SKirill Rybalchenko } else {
2764c07e055SKirill Rybalchenko dev->enqueue_burst = &schedule_enqueue;
2774c07e055SKirill Rybalchenko dev->dequeue_burst = &schedule_dequeue;
2784c07e055SKirill Rybalchenko }
2794c07e055SKirill Rybalchenko
2804c07e055SKirill Rybalchenko for (i = 0; i < dev->data->nb_queue_pairs; i++) {
2814c07e055SKirill Rybalchenko struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
2824c07e055SKirill Rybalchenko struct mc_scheduler_qp_ctx *mc_qp_ctx =
2834c07e055SKirill Rybalchenko qp_ctx->private_qp_ctx;
2844c07e055SKirill Rybalchenko uint32_t j;
2854c07e055SKirill Rybalchenko
28685b00824SAdam Dybkowski memset(mc_qp_ctx->workers, 0,
28785b00824SAdam Dybkowski RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS *
28885b00824SAdam Dybkowski sizeof(struct scheduler_worker));
28985b00824SAdam Dybkowski for (j = 0; j < sched_ctx->nb_workers; j++) {
29085b00824SAdam Dybkowski mc_qp_ctx->workers[j].dev_id =
29185b00824SAdam Dybkowski sched_ctx->workers[j].dev_id;
29285b00824SAdam Dybkowski mc_qp_ctx->workers[j].qp_id = i;
2934c07e055SKirill Rybalchenko }
2944c07e055SKirill Rybalchenko
29585b00824SAdam Dybkowski mc_qp_ctx->nb_workers = sched_ctx->nb_workers;
2964c07e055SKirill Rybalchenko
2974c07e055SKirill Rybalchenko mc_qp_ctx->last_enq_worker_idx = 0;
2984c07e055SKirill Rybalchenko mc_qp_ctx->last_deq_worker_idx = 0;
2994c07e055SKirill Rybalchenko }
3004c07e055SKirill Rybalchenko
3014c07e055SKirill Rybalchenko return 0;
3024c07e055SKirill Rybalchenko }
3034c07e055SKirill Rybalchenko
3044c07e055SKirill Rybalchenko static int
scheduler_stop(struct rte_cryptodev * dev)3054c07e055SKirill Rybalchenko scheduler_stop(struct rte_cryptodev *dev)
3064c07e055SKirill Rybalchenko {
3074c07e055SKirill Rybalchenko struct scheduler_ctx *sched_ctx = dev->data->dev_private;
3084c07e055SKirill Rybalchenko struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
309a76f6b1bSJan Blunck uint16_t i;
3104c07e055SKirill Rybalchenko
3114c07e055SKirill Rybalchenko mc_ctx->stop_signal = 1;
3124c07e055SKirill Rybalchenko
313a76f6b1bSJan Blunck for (i = 0; i < sched_ctx->nb_wc; i++)
3144c07e055SKirill Rybalchenko rte_eal_wait_lcore(sched_ctx->wc_pool[i]);
3154c07e055SKirill Rybalchenko
3164c07e055SKirill Rybalchenko return 0;
3174c07e055SKirill Rybalchenko }
3184c07e055SKirill Rybalchenko
3194c07e055SKirill Rybalchenko static int
scheduler_config_qp(struct rte_cryptodev * dev,uint16_t qp_id)3204c07e055SKirill Rybalchenko scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
3214c07e055SKirill Rybalchenko {
3224c07e055SKirill Rybalchenko struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
3234c07e055SKirill Rybalchenko struct mc_scheduler_qp_ctx *mc_qp_ctx;
3244c07e055SKirill Rybalchenko struct scheduler_ctx *sched_ctx = dev->data->dev_private;
3254c07e055SKirill Rybalchenko struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
3264c07e055SKirill Rybalchenko
3274c07e055SKirill Rybalchenko mc_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*mc_qp_ctx), 0,
3284c07e055SKirill Rybalchenko rte_socket_id());
3294c07e055SKirill Rybalchenko if (!mc_qp_ctx) {
33085aa6d34SHari Kumar CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
3314c07e055SKirill Rybalchenko return -ENOMEM;
3324c07e055SKirill Rybalchenko }
3334c07e055SKirill Rybalchenko
3344c07e055SKirill Rybalchenko mc_qp_ctx->mc_private_ctx = mc_ctx;
3354c07e055SKirill Rybalchenko qp_ctx->private_qp_ctx = (void *)mc_qp_ctx;
3364c07e055SKirill Rybalchenko
3374c07e055SKirill Rybalchenko
3384c07e055SKirill Rybalchenko return 0;
3394c07e055SKirill Rybalchenko }
3404c07e055SKirill Rybalchenko
3414c07e055SKirill Rybalchenko static int
scheduler_create_private_ctx(struct rte_cryptodev * dev)3424c07e055SKirill Rybalchenko scheduler_create_private_ctx(struct rte_cryptodev *dev)
3434c07e055SKirill Rybalchenko {
3444c07e055SKirill Rybalchenko struct scheduler_ctx *sched_ctx = dev->data->dev_private;
34506f0a569SPablo de Lara struct mc_scheduler_ctx *mc_ctx = NULL;
346a76f6b1bSJan Blunck uint16_t i;
3474c07e055SKirill Rybalchenko
34806f0a569SPablo de Lara if (sched_ctx->private_ctx) {
3494c07e055SKirill Rybalchenko rte_free(sched_ctx->private_ctx);
35006f0a569SPablo de Lara sched_ctx->private_ctx = NULL;
35106f0a569SPablo de Lara }
3524c07e055SKirill Rybalchenko
3534c07e055SKirill Rybalchenko mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0,
3544c07e055SKirill Rybalchenko rte_socket_id());
3554c07e055SKirill Rybalchenko if (!mc_ctx) {
35685aa6d34SHari Kumar CR_SCHED_LOG(ERR, "failed allocate memory");
3574c07e055SKirill Rybalchenko return -ENOMEM;
3584c07e055SKirill Rybalchenko }
3594c07e055SKirill Rybalchenko
3604c07e055SKirill Rybalchenko mc_ctx->num_workers = sched_ctx->nb_wc;
361a76f6b1bSJan Blunck for (i = 0; i < sched_ctx->nb_wc; i++) {
3624c07e055SKirill Rybalchenko char r_name[16];
3634c07e055SKirill Rybalchenko
36491b9c522SFan Zhang snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX
36591b9c522SFan Zhang "%u_%u", dev->data->dev_id, i);
366cc32201aSKirill Rybalchenko mc_ctx->sched_enq_ring[i] = rte_ring_lookup(r_name);
3674c07e055SKirill Rybalchenko if (!mc_ctx->sched_enq_ring[i]) {
368cc32201aSKirill Rybalchenko mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name,
36985b00824SAdam Dybkowski PER_WORKER_BUFF_SIZE,
370cc32201aSKirill Rybalchenko rte_socket_id(),
371cc32201aSKirill Rybalchenko RING_F_SC_DEQ | RING_F_SP_ENQ);
372cc32201aSKirill Rybalchenko if (!mc_ctx->sched_enq_ring[i]) {
37385aa6d34SHari Kumar CR_SCHED_LOG(ERR, "Cannot create ring for worker %u",
374cc32201aSKirill Rybalchenko i);
3757b2491a6SPablo de Lara goto exit;
3764c07e055SKirill Rybalchenko }
377cc32201aSKirill Rybalchenko }
37891b9c522SFan Zhang snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX
37991b9c522SFan Zhang "%u_%u", dev->data->dev_id, i);
380cc32201aSKirill Rybalchenko mc_ctx->sched_deq_ring[i] = rte_ring_lookup(r_name);
3814c07e055SKirill Rybalchenko if (!mc_ctx->sched_deq_ring[i]) {
382cc32201aSKirill Rybalchenko mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name,
38385b00824SAdam Dybkowski PER_WORKER_BUFF_SIZE,
384cc32201aSKirill Rybalchenko rte_socket_id(),
385cc32201aSKirill Rybalchenko RING_F_SC_DEQ | RING_F_SP_ENQ);
386cc32201aSKirill Rybalchenko if (!mc_ctx->sched_deq_ring[i]) {
38785aa6d34SHari Kumar CR_SCHED_LOG(ERR, "Cannot create ring for worker %u",
388cc32201aSKirill Rybalchenko i);
3897b2491a6SPablo de Lara goto exit;
3904c07e055SKirill Rybalchenko }
3914c07e055SKirill Rybalchenko }
392cc32201aSKirill Rybalchenko }
3934c07e055SKirill Rybalchenko
3944c07e055SKirill Rybalchenko sched_ctx->private_ctx = (void *)mc_ctx;
3954c07e055SKirill Rybalchenko
3964c07e055SKirill Rybalchenko return 0;
3977b2491a6SPablo de Lara
3987b2491a6SPablo de Lara exit:
3997b2491a6SPablo de Lara for (i = 0; i < sched_ctx->nb_wc; i++) {
4007b2491a6SPablo de Lara rte_ring_free(mc_ctx->sched_enq_ring[i]);
4017b2491a6SPablo de Lara rte_ring_free(mc_ctx->sched_deq_ring[i]);
4027b2491a6SPablo de Lara }
4037b2491a6SPablo de Lara rte_free(mc_ctx);
4047b2491a6SPablo de Lara
4057b2491a6SPablo de Lara return -1;
4064c07e055SKirill Rybalchenko }
4074c07e055SKirill Rybalchenko
408b74fd6b8SFerruh Yigit static struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
40985b00824SAdam Dybkowski worker_attach,
41085b00824SAdam Dybkowski worker_detach,
4114c07e055SKirill Rybalchenko scheduler_start,
4124c07e055SKirill Rybalchenko scheduler_stop,
4134c07e055SKirill Rybalchenko scheduler_config_qp,
4144c07e055SKirill Rybalchenko scheduler_create_private_ctx,
4154c07e055SKirill Rybalchenko NULL, /* option_set */
4164c07e055SKirill Rybalchenko NULL /* option_get */
4174c07e055SKirill Rybalchenko };
4184c07e055SKirill Rybalchenko
419b74fd6b8SFerruh Yigit static struct rte_cryptodev_scheduler mc_scheduler = {
4204c07e055SKirill Rybalchenko .name = "multicore-scheduler",
4214c07e055SKirill Rybalchenko .description = "scheduler which will run burst across multiple cpu cores",
4224c07e055SKirill Rybalchenko .mode = CDEV_SCHED_MODE_MULTICORE,
4234c07e055SKirill Rybalchenko .ops = &scheduler_mc_ops
4244c07e055SKirill Rybalchenko };
4254c07e055SKirill Rybalchenko
426520dd992SFerruh Yigit struct rte_cryptodev_scheduler *crypto_scheduler_multicore = &mc_scheduler;
427