15566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
25566a3e3SBruce Richardson * Copyright(c) 2017 Intel Corporation
3100e4f7eSFan Zhang */
4100e4f7eSFan Zhang
592cb1309SAkhil Goyal #include <cryptodev_pmd.h>
6100e4f7eSFan Zhang #include <rte_malloc.h>
7100e4f7eSFan Zhang
8100e4f7eSFan Zhang #include "rte_cryptodev_scheduler_operations.h"
9100e4f7eSFan Zhang #include "scheduler_pmd_private.h"
10100e4f7eSFan Zhang
11100e4f7eSFan Zhang struct rr_scheduler_qp_ctx {
1285b00824SAdam Dybkowski struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
1385b00824SAdam Dybkowski uint32_t nb_workers;
14100e4f7eSFan Zhang
1585b00824SAdam Dybkowski uint32_t last_enq_worker_idx;
1685b00824SAdam Dybkowski uint32_t last_deq_worker_idx;
17100e4f7eSFan Zhang };
18100e4f7eSFan Zhang
19100e4f7eSFan Zhang static uint16_t
schedule_enqueue(void * qp,struct rte_crypto_op ** ops,uint16_t nb_ops)20211e27a9SFan Zhang schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
21100e4f7eSFan Zhang {
22100e4f7eSFan Zhang struct rr_scheduler_qp_ctx *rr_qp_ctx =
23211e27a9SFan Zhang ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
2485b00824SAdam Dybkowski uint32_t worker_idx = rr_qp_ctx->last_enq_worker_idx;
2585b00824SAdam Dybkowski struct scheduler_worker *worker = &rr_qp_ctx->workers[worker_idx];
266812b9bfSFan Zhang uint16_t processed_ops;
27100e4f7eSFan Zhang
28100e4f7eSFan Zhang if (unlikely(nb_ops == 0))
29100e4f7eSFan Zhang return 0;
30100e4f7eSFan Zhang
31*e2af4e40SDavid Coyle scheduler_set_worker_sessions(ops, nb_ops, worker_idx);
3285b00824SAdam Dybkowski processed_ops = rte_cryptodev_enqueue_burst(worker->dev_id,
3385b00824SAdam Dybkowski worker->qp_id, ops, nb_ops);
346812b9bfSFan Zhang if (processed_ops < nb_ops)
35*e2af4e40SDavid Coyle scheduler_retrieve_sessions(ops + processed_ops,
366812b9bfSFan Zhang nb_ops - processed_ops);
37100e4f7eSFan Zhang
3885b00824SAdam Dybkowski worker->nb_inflight_cops += processed_ops;
39100e4f7eSFan Zhang
4085b00824SAdam Dybkowski rr_qp_ctx->last_enq_worker_idx += 1;
4185b00824SAdam Dybkowski rr_qp_ctx->last_enq_worker_idx %= rr_qp_ctx->nb_workers;
42100e4f7eSFan Zhang
43100e4f7eSFan Zhang return processed_ops;
44100e4f7eSFan Zhang }
45100e4f7eSFan Zhang
46100e4f7eSFan Zhang static uint16_t
schedule_enqueue_ordering(void * qp,struct rte_crypto_op ** ops,uint16_t nb_ops)47211e27a9SFan Zhang schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
48100e4f7eSFan Zhang uint16_t nb_ops)
49100e4f7eSFan Zhang {
508a48e039SFan Zhang struct rte_ring *order_ring =
518a48e039SFan Zhang ((struct scheduler_qp_ctx *)qp)->order_ring;
528a48e039SFan Zhang uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
538a48e039SFan Zhang nb_ops);
548a48e039SFan Zhang uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
558a48e039SFan Zhang nb_ops_to_enq);
56100e4f7eSFan Zhang
578a48e039SFan Zhang scheduler_order_insert(order_ring, ops, nb_ops_enqd);
58100e4f7eSFan Zhang
598a48e039SFan Zhang return nb_ops_enqd;
60100e4f7eSFan Zhang }
61100e4f7eSFan Zhang
62100e4f7eSFan Zhang
63100e4f7eSFan Zhang static uint16_t
schedule_dequeue(void * qp,struct rte_crypto_op ** ops,uint16_t nb_ops)64211e27a9SFan Zhang schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
65100e4f7eSFan Zhang {
66100e4f7eSFan Zhang struct rr_scheduler_qp_ctx *rr_qp_ctx =
67211e27a9SFan Zhang ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
6885b00824SAdam Dybkowski struct scheduler_worker *worker;
6985b00824SAdam Dybkowski uint32_t last_worker_idx = rr_qp_ctx->last_deq_worker_idx;
70100e4f7eSFan Zhang uint16_t nb_deq_ops;
71100e4f7eSFan Zhang
7285b00824SAdam Dybkowski if (unlikely(rr_qp_ctx->workers[last_worker_idx].nb_inflight_cops
7385b00824SAdam Dybkowski == 0)) {
74100e4f7eSFan Zhang do {
7585b00824SAdam Dybkowski last_worker_idx += 1;
76100e4f7eSFan Zhang
7785b00824SAdam Dybkowski if (unlikely(last_worker_idx >= rr_qp_ctx->nb_workers))
7885b00824SAdam Dybkowski last_worker_idx = 0;
79100e4f7eSFan Zhang /* looped back, means no inflight cops in the queue */
8085b00824SAdam Dybkowski if (last_worker_idx == rr_qp_ctx->last_deq_worker_idx)
81100e4f7eSFan Zhang return 0;
8285b00824SAdam Dybkowski } while (rr_qp_ctx->workers[last_worker_idx].nb_inflight_cops
83100e4f7eSFan Zhang == 0);
84100e4f7eSFan Zhang }
85100e4f7eSFan Zhang
8685b00824SAdam Dybkowski worker = &rr_qp_ctx->workers[last_worker_idx];
87100e4f7eSFan Zhang
8885b00824SAdam Dybkowski nb_deq_ops = rte_cryptodev_dequeue_burst(worker->dev_id,
8985b00824SAdam Dybkowski worker->qp_id, ops, nb_ops);
90*e2af4e40SDavid Coyle scheduler_retrieve_sessions(ops, nb_deq_ops);
9185b00824SAdam Dybkowski last_worker_idx += 1;
9285b00824SAdam Dybkowski last_worker_idx %= rr_qp_ctx->nb_workers;
93100e4f7eSFan Zhang
9485b00824SAdam Dybkowski rr_qp_ctx->last_deq_worker_idx = last_worker_idx;
95100e4f7eSFan Zhang
9685b00824SAdam Dybkowski worker->nb_inflight_cops -= nb_deq_ops;
97100e4f7eSFan Zhang
98100e4f7eSFan Zhang return nb_deq_ops;
99100e4f7eSFan Zhang }
100100e4f7eSFan Zhang
101100e4f7eSFan Zhang static uint16_t
schedule_dequeue_ordering(void * qp,struct rte_crypto_op ** ops,uint16_t nb_ops)102211e27a9SFan Zhang schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
103100e4f7eSFan Zhang uint16_t nb_ops)
104100e4f7eSFan Zhang {
1058a48e039SFan Zhang struct rte_ring *order_ring =
1068a48e039SFan Zhang ((struct scheduler_qp_ctx *)qp)->order_ring;
107100e4f7eSFan Zhang
1088a48e039SFan Zhang schedule_dequeue(qp, ops, nb_ops);
109100e4f7eSFan Zhang
1108a48e039SFan Zhang return scheduler_order_drain(order_ring, ops, nb_ops);
111100e4f7eSFan Zhang }
112100e4f7eSFan Zhang
113100e4f7eSFan Zhang static int
worker_attach(__rte_unused struct rte_cryptodev * dev,__rte_unused uint8_t worker_id)11485b00824SAdam Dybkowski worker_attach(__rte_unused struct rte_cryptodev *dev,
11585b00824SAdam Dybkowski __rte_unused uint8_t worker_id)
116100e4f7eSFan Zhang {
117100e4f7eSFan Zhang return 0;
118100e4f7eSFan Zhang }
119100e4f7eSFan Zhang
120100e4f7eSFan Zhang static int
worker_detach(__rte_unused struct rte_cryptodev * dev,__rte_unused uint8_t worker_id)12185b00824SAdam Dybkowski worker_detach(__rte_unused struct rte_cryptodev *dev,
12285b00824SAdam Dybkowski __rte_unused uint8_t worker_id)
123100e4f7eSFan Zhang {
124100e4f7eSFan Zhang return 0;
125100e4f7eSFan Zhang }
126100e4f7eSFan Zhang
127100e4f7eSFan Zhang static int
scheduler_start(struct rte_cryptodev * dev)128100e4f7eSFan Zhang scheduler_start(struct rte_cryptodev *dev)
129100e4f7eSFan Zhang {
130100e4f7eSFan Zhang struct scheduler_ctx *sched_ctx = dev->data->dev_private;
131100e4f7eSFan Zhang uint16_t i;
132100e4f7eSFan Zhang
133211e27a9SFan Zhang if (sched_ctx->reordering_enabled) {
134211e27a9SFan Zhang dev->enqueue_burst = &schedule_enqueue_ordering;
135211e27a9SFan Zhang dev->dequeue_burst = &schedule_dequeue_ordering;
136211e27a9SFan Zhang } else {
137211e27a9SFan Zhang dev->enqueue_burst = &schedule_enqueue;
138211e27a9SFan Zhang dev->dequeue_burst = &schedule_dequeue;
139211e27a9SFan Zhang }
140211e27a9SFan Zhang
141100e4f7eSFan Zhang for (i = 0; i < dev->data->nb_queue_pairs; i++) {
142100e4f7eSFan Zhang struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
143100e4f7eSFan Zhang struct rr_scheduler_qp_ctx *rr_qp_ctx =
144100e4f7eSFan Zhang qp_ctx->private_qp_ctx;
145100e4f7eSFan Zhang uint32_t j;
146100e4f7eSFan Zhang
14785b00824SAdam Dybkowski memset(rr_qp_ctx->workers, 0,
14885b00824SAdam Dybkowski RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS *
14985b00824SAdam Dybkowski sizeof(struct scheduler_worker));
15085b00824SAdam Dybkowski for (j = 0; j < sched_ctx->nb_workers; j++) {
15185b00824SAdam Dybkowski rr_qp_ctx->workers[j].dev_id =
15285b00824SAdam Dybkowski sched_ctx->workers[j].dev_id;
15385b00824SAdam Dybkowski rr_qp_ctx->workers[j].qp_id = i;
154100e4f7eSFan Zhang }
155100e4f7eSFan Zhang
15685b00824SAdam Dybkowski rr_qp_ctx->nb_workers = sched_ctx->nb_workers;
157100e4f7eSFan Zhang
15885b00824SAdam Dybkowski rr_qp_ctx->last_enq_worker_idx = 0;
15985b00824SAdam Dybkowski rr_qp_ctx->last_deq_worker_idx = 0;
160100e4f7eSFan Zhang }
161100e4f7eSFan Zhang
162100e4f7eSFan Zhang return 0;
163100e4f7eSFan Zhang }
164100e4f7eSFan Zhang
165100e4f7eSFan Zhang static int
scheduler_stop(__rte_unused struct rte_cryptodev * dev)166100e4f7eSFan Zhang scheduler_stop(__rte_unused struct rte_cryptodev *dev)
167100e4f7eSFan Zhang {
168100e4f7eSFan Zhang return 0;
169100e4f7eSFan Zhang }
170100e4f7eSFan Zhang
171100e4f7eSFan Zhang static int
scheduler_config_qp(struct rte_cryptodev * dev,uint16_t qp_id)172100e4f7eSFan Zhang scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
173100e4f7eSFan Zhang {
174100e4f7eSFan Zhang struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
175100e4f7eSFan Zhang struct rr_scheduler_qp_ctx *rr_qp_ctx;
176100e4f7eSFan Zhang
177100e4f7eSFan Zhang rr_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*rr_qp_ctx), 0,
178100e4f7eSFan Zhang rte_socket_id());
179100e4f7eSFan Zhang if (!rr_qp_ctx) {
18085aa6d34SHari Kumar CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
181100e4f7eSFan Zhang return -ENOMEM;
182100e4f7eSFan Zhang }
183100e4f7eSFan Zhang
184100e4f7eSFan Zhang qp_ctx->private_qp_ctx = (void *)rr_qp_ctx;
185100e4f7eSFan Zhang
186100e4f7eSFan Zhang return 0;
187100e4f7eSFan Zhang }
188100e4f7eSFan Zhang
189100e4f7eSFan Zhang static int
scheduler_create_private_ctx(__rte_unused struct rte_cryptodev * dev)190100e4f7eSFan Zhang scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
191100e4f7eSFan Zhang {
192100e4f7eSFan Zhang return 0;
193100e4f7eSFan Zhang }
194100e4f7eSFan Zhang
195b74fd6b8SFerruh Yigit static struct rte_cryptodev_scheduler_ops scheduler_rr_ops = {
19685b00824SAdam Dybkowski worker_attach,
19785b00824SAdam Dybkowski worker_detach,
198100e4f7eSFan Zhang scheduler_start,
199100e4f7eSFan Zhang scheduler_stop,
200100e4f7eSFan Zhang scheduler_config_qp,
2014e30ead5SFan Zhang scheduler_create_private_ctx,
2024e30ead5SFan Zhang NULL, /* option_set */
2034e30ead5SFan Zhang NULL /* option_get */
204100e4f7eSFan Zhang };
205100e4f7eSFan Zhang
206b74fd6b8SFerruh Yigit static struct rte_cryptodev_scheduler scheduler = {
207100e4f7eSFan Zhang .name = "roundrobin-scheduler",
208100e4f7eSFan Zhang .description = "scheduler which will round robin burst across "
20985b00824SAdam Dybkowski "worker crypto devices",
210100e4f7eSFan Zhang .mode = CDEV_SCHED_MODE_ROUNDROBIN,
211100e4f7eSFan Zhang .ops = &scheduler_rr_ops
212100e4f7eSFan Zhang };
213100e4f7eSFan Zhang
214520dd992SFerruh Yigit struct rte_cryptodev_scheduler *crypto_scheduler_roundrobin = &scheduler;
215