xref: /dpdk/drivers/crypto/scheduler/scheduler_failover.c (revision e2af4e403c15b9de0d692288bbea866e981dba4d)
15566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
25566a3e3SBruce Richardson  * Copyright(c) 2017 Intel Corporation
337f075daSFan Zhang  */
437f075daSFan Zhang 
592cb1309SAkhil Goyal #include <cryptodev_pmd.h>
637f075daSFan Zhang #include <rte_malloc.h>
737f075daSFan Zhang 
837f075daSFan Zhang #include "rte_cryptodev_scheduler_operations.h"
937f075daSFan Zhang #include "scheduler_pmd_private.h"
1037f075daSFan Zhang 
1185b00824SAdam Dybkowski #define PRIMARY_WORKER_IDX	0
1285b00824SAdam Dybkowski #define SECONDARY_WORKER_IDX	1
1385b00824SAdam Dybkowski #define NB_FAILOVER_WORKERS	2
1485b00824SAdam Dybkowski #define WORKER_SWITCH_MASK	(0x01)
1537f075daSFan Zhang 
1637f075daSFan Zhang struct fo_scheduler_qp_ctx {
1785b00824SAdam Dybkowski 	struct scheduler_worker primary_worker;
1885b00824SAdam Dybkowski 	struct scheduler_worker secondary_worker;
196812b9bfSFan Zhang 	uint8_t primary_worker_index;
206812b9bfSFan Zhang 	uint8_t secondary_worker_index;
2137f075daSFan Zhang 
2237f075daSFan Zhang 	uint8_t deq_idx;
2337f075daSFan Zhang };
2437f075daSFan Zhang 
25c0583d98SJerin Jacob static __rte_always_inline uint16_t
failover_worker_enqueue(struct scheduler_worker * worker,struct rte_crypto_op ** ops,uint16_t nb_ops,uint8_t index)2685b00824SAdam Dybkowski failover_worker_enqueue(struct scheduler_worker *worker,
276812b9bfSFan Zhang 		struct rte_crypto_op **ops, uint16_t nb_ops, uint8_t index)
2837f075daSFan Zhang {
296812b9bfSFan Zhang 	uint16_t processed_ops;
3037f075daSFan Zhang 
31*e2af4e40SDavid Coyle 	scheduler_set_worker_sessions(ops, nb_ops, index);
3237f075daSFan Zhang 
3385b00824SAdam Dybkowski 	processed_ops = rte_cryptodev_enqueue_burst(worker->dev_id,
3485b00824SAdam Dybkowski 			worker->qp_id, ops, nb_ops);
3585b00824SAdam Dybkowski 	worker->nb_inflight_cops += processed_ops;
3637f075daSFan Zhang 
3737f075daSFan Zhang 	return processed_ops;
3837f075daSFan Zhang }
3937f075daSFan Zhang 
4037f075daSFan Zhang static uint16_t
schedule_enqueue(void * qp,struct rte_crypto_op ** ops,uint16_t nb_ops)4137f075daSFan Zhang schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
4237f075daSFan Zhang {
4337f075daSFan Zhang 	struct fo_scheduler_qp_ctx *qp_ctx =
4437f075daSFan Zhang 			((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
4537f075daSFan Zhang 	uint16_t enqueued_ops;
4637f075daSFan Zhang 
4737f075daSFan Zhang 	if (unlikely(nb_ops == 0))
4837f075daSFan Zhang 		return 0;
4937f075daSFan Zhang 
5085b00824SAdam Dybkowski 	enqueued_ops = failover_worker_enqueue(&qp_ctx->primary_worker,
516812b9bfSFan Zhang 			ops, nb_ops, PRIMARY_WORKER_IDX);
5237f075daSFan Zhang 
53f436fdcbSCiara Power 	if (enqueued_ops < nb_ops) {
54*e2af4e40SDavid Coyle 		scheduler_retrieve_sessions(&ops[enqueued_ops],
55f436fdcbSCiara Power 						nb_ops - enqueued_ops);
5685b00824SAdam Dybkowski 		enqueued_ops += failover_worker_enqueue(
5785b00824SAdam Dybkowski 				&qp_ctx->secondary_worker,
58b3bbd9e5SSlawomir Mrozowicz 				&ops[enqueued_ops],
596812b9bfSFan Zhang 				nb_ops - enqueued_ops,
606812b9bfSFan Zhang 				SECONDARY_WORKER_IDX);
61f436fdcbSCiara Power 		if (enqueued_ops < nb_ops)
62*e2af4e40SDavid Coyle 			scheduler_retrieve_sessions(&ops[enqueued_ops],
63f436fdcbSCiara Power 						nb_ops - enqueued_ops);
64f436fdcbSCiara Power 	}
6537f075daSFan Zhang 
6637f075daSFan Zhang 	return enqueued_ops;
6737f075daSFan Zhang }
6837f075daSFan Zhang 
6937f075daSFan Zhang 
7037f075daSFan Zhang static uint16_t
schedule_enqueue_ordering(void * qp,struct rte_crypto_op ** ops,uint16_t nb_ops)7137f075daSFan Zhang schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
7237f075daSFan Zhang 		uint16_t nb_ops)
7337f075daSFan Zhang {
7437f075daSFan Zhang 	struct rte_ring *order_ring =
7537f075daSFan Zhang 			((struct scheduler_qp_ctx *)qp)->order_ring;
7637f075daSFan Zhang 	uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
7737f075daSFan Zhang 			nb_ops);
7837f075daSFan Zhang 	uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
7937f075daSFan Zhang 			nb_ops_to_enq);
8037f075daSFan Zhang 
8137f075daSFan Zhang 	scheduler_order_insert(order_ring, ops, nb_ops_enqd);
8237f075daSFan Zhang 
8337f075daSFan Zhang 	return nb_ops_enqd;
8437f075daSFan Zhang }
8537f075daSFan Zhang 
8637f075daSFan Zhang static uint16_t
schedule_dequeue(void * qp,struct rte_crypto_op ** ops,uint16_t nb_ops)8737f075daSFan Zhang schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
8837f075daSFan Zhang {
8937f075daSFan Zhang 	struct fo_scheduler_qp_ctx *qp_ctx =
9037f075daSFan Zhang 			((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
9185b00824SAdam Dybkowski 	struct scheduler_worker *workers[NB_FAILOVER_WORKERS] = {
9285b00824SAdam Dybkowski 			&qp_ctx->primary_worker, &qp_ctx->secondary_worker};
9385b00824SAdam Dybkowski 	struct scheduler_worker *worker = workers[qp_ctx->deq_idx];
9437f075daSFan Zhang 	uint16_t nb_deq_ops = 0, nb_deq_ops2 = 0;
9537f075daSFan Zhang 
9685b00824SAdam Dybkowski 	if (worker->nb_inflight_cops) {
9785b00824SAdam Dybkowski 		nb_deq_ops = rte_cryptodev_dequeue_burst(worker->dev_id,
9885b00824SAdam Dybkowski 			worker->qp_id, ops, nb_ops);
9985b00824SAdam Dybkowski 		worker->nb_inflight_cops -= nb_deq_ops;
10037f075daSFan Zhang 	}
10137f075daSFan Zhang 
10285b00824SAdam Dybkowski 	qp_ctx->deq_idx = (~qp_ctx->deq_idx) & WORKER_SWITCH_MASK;
10337f075daSFan Zhang 
10437f075daSFan Zhang 	if (nb_deq_ops == nb_ops)
105*e2af4e40SDavid Coyle 		goto retrieve_sessions;
10637f075daSFan Zhang 
10785b00824SAdam Dybkowski 	worker = workers[qp_ctx->deq_idx];
10837f075daSFan Zhang 
10985b00824SAdam Dybkowski 	if (worker->nb_inflight_cops) {
11085b00824SAdam Dybkowski 		nb_deq_ops2 = rte_cryptodev_dequeue_burst(worker->dev_id,
11185b00824SAdam Dybkowski 			worker->qp_id, &ops[nb_deq_ops], nb_ops - nb_deq_ops);
11285b00824SAdam Dybkowski 		worker->nb_inflight_cops -= nb_deq_ops2;
11337f075daSFan Zhang 	}
11437f075daSFan Zhang 
115*e2af4e40SDavid Coyle retrieve_sessions:
116*e2af4e40SDavid Coyle 	scheduler_retrieve_sessions(ops, nb_deq_ops + nb_deq_ops2);
1176812b9bfSFan Zhang 
11837f075daSFan Zhang 	return nb_deq_ops + nb_deq_ops2;
11937f075daSFan Zhang }
12037f075daSFan Zhang 
12137f075daSFan Zhang static uint16_t
schedule_dequeue_ordering(void * qp,struct rte_crypto_op ** ops,uint16_t nb_ops)12237f075daSFan Zhang schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
12337f075daSFan Zhang 		uint16_t nb_ops)
12437f075daSFan Zhang {
12537f075daSFan Zhang 	struct rte_ring *order_ring =
12637f075daSFan Zhang 			((struct scheduler_qp_ctx *)qp)->order_ring;
12737f075daSFan Zhang 
12837f075daSFan Zhang 	schedule_dequeue(qp, ops, nb_ops);
12937f075daSFan Zhang 
13037f075daSFan Zhang 	return scheduler_order_drain(order_ring, ops, nb_ops);
13137f075daSFan Zhang }
13237f075daSFan Zhang 
13337f075daSFan Zhang static int
worker_attach(__rte_unused struct rte_cryptodev * dev,__rte_unused uint8_t worker_id)13485b00824SAdam Dybkowski worker_attach(__rte_unused struct rte_cryptodev *dev,
13585b00824SAdam Dybkowski 		__rte_unused uint8_t worker_id)
13637f075daSFan Zhang {
13737f075daSFan Zhang 	return 0;
13837f075daSFan Zhang }
13937f075daSFan Zhang 
14037f075daSFan Zhang static int
worker_detach(__rte_unused struct rte_cryptodev * dev,__rte_unused uint8_t worker_id)14185b00824SAdam Dybkowski worker_detach(__rte_unused struct rte_cryptodev *dev,
14285b00824SAdam Dybkowski 		__rte_unused uint8_t worker_id)
14337f075daSFan Zhang {
14437f075daSFan Zhang 	return 0;
14537f075daSFan Zhang }
14637f075daSFan Zhang 
14737f075daSFan Zhang static int
scheduler_start(struct rte_cryptodev * dev)14837f075daSFan Zhang scheduler_start(struct rte_cryptodev *dev)
14937f075daSFan Zhang {
15037f075daSFan Zhang 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
15137f075daSFan Zhang 	uint16_t i;
15237f075daSFan Zhang 
15385b00824SAdam Dybkowski 	if (sched_ctx->nb_workers < 2) {
15485b00824SAdam Dybkowski 		CR_SCHED_LOG(ERR, "Number of workers shall no less than 2");
15537f075daSFan Zhang 		return -ENOMEM;
15637f075daSFan Zhang 	}
15737f075daSFan Zhang 
15837f075daSFan Zhang 	if (sched_ctx->reordering_enabled) {
15937f075daSFan Zhang 		dev->enqueue_burst = schedule_enqueue_ordering;
16037f075daSFan Zhang 		dev->dequeue_burst = schedule_dequeue_ordering;
16137f075daSFan Zhang 	} else {
16237f075daSFan Zhang 		dev->enqueue_burst = schedule_enqueue;
16337f075daSFan Zhang 		dev->dequeue_burst = schedule_dequeue;
16437f075daSFan Zhang 	}
16537f075daSFan Zhang 
16637f075daSFan Zhang 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
16737f075daSFan Zhang 		struct fo_scheduler_qp_ctx *qp_ctx =
16837f075daSFan Zhang 			((struct scheduler_qp_ctx *)
16937f075daSFan Zhang 				dev->data->queue_pairs[i])->private_qp_ctx;
17037f075daSFan Zhang 
171b3027086SJakub Wysocki 		sched_ctx->workers[PRIMARY_WORKER_IDX].qp_id = i;
172b3027086SJakub Wysocki 		sched_ctx->workers[SECONDARY_WORKER_IDX].qp_id = i;
173b3027086SJakub Wysocki 
17485b00824SAdam Dybkowski 		rte_memcpy(&qp_ctx->primary_worker,
17585b00824SAdam Dybkowski 				&sched_ctx->workers[PRIMARY_WORKER_IDX],
17685b00824SAdam Dybkowski 				sizeof(struct scheduler_worker));
17785b00824SAdam Dybkowski 		rte_memcpy(&qp_ctx->secondary_worker,
17885b00824SAdam Dybkowski 				&sched_ctx->workers[SECONDARY_WORKER_IDX],
17985b00824SAdam Dybkowski 				sizeof(struct scheduler_worker));
18037f075daSFan Zhang 	}
18137f075daSFan Zhang 
18237f075daSFan Zhang 	return 0;
18337f075daSFan Zhang }
18437f075daSFan Zhang 
18537f075daSFan Zhang static int
scheduler_stop(__rte_unused struct rte_cryptodev * dev)18637f075daSFan Zhang scheduler_stop(__rte_unused struct rte_cryptodev *dev)
18737f075daSFan Zhang {
18837f075daSFan Zhang 	return 0;
18937f075daSFan Zhang }
19037f075daSFan Zhang 
19137f075daSFan Zhang static int
scheduler_config_qp(struct rte_cryptodev * dev,uint16_t qp_id)19237f075daSFan Zhang scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
19337f075daSFan Zhang {
19437f075daSFan Zhang 	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
19537f075daSFan Zhang 	struct fo_scheduler_qp_ctx *fo_qp_ctx;
19637f075daSFan Zhang 
19737f075daSFan Zhang 	fo_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*fo_qp_ctx), 0,
19837f075daSFan Zhang 			rte_socket_id());
19937f075daSFan Zhang 	if (!fo_qp_ctx) {
20085aa6d34SHari Kumar 		CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
20137f075daSFan Zhang 		return -ENOMEM;
20237f075daSFan Zhang 	}
20337f075daSFan Zhang 
20437f075daSFan Zhang 	qp_ctx->private_qp_ctx = (void *)fo_qp_ctx;
20537f075daSFan Zhang 
20637f075daSFan Zhang 	return 0;
20737f075daSFan Zhang }
20837f075daSFan Zhang 
20937f075daSFan Zhang static int
scheduler_create_private_ctx(__rte_unused struct rte_cryptodev * dev)21037f075daSFan Zhang scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
21137f075daSFan Zhang {
21237f075daSFan Zhang 	return 0;
21337f075daSFan Zhang }
21437f075daSFan Zhang 
215b74fd6b8SFerruh Yigit static struct rte_cryptodev_scheduler_ops scheduler_fo_ops = {
21685b00824SAdam Dybkowski 	worker_attach,
21785b00824SAdam Dybkowski 	worker_detach,
21837f075daSFan Zhang 	scheduler_start,
21937f075daSFan Zhang 	scheduler_stop,
22037f075daSFan Zhang 	scheduler_config_qp,
22137f075daSFan Zhang 	scheduler_create_private_ctx,
2224e30ead5SFan Zhang 	NULL,	/* option_set */
2234e30ead5SFan Zhang 	NULL	/*option_get */
22437f075daSFan Zhang };
22537f075daSFan Zhang 
226b74fd6b8SFerruh Yigit static struct rte_cryptodev_scheduler fo_scheduler = {
22737f075daSFan Zhang 		.name = "failover-scheduler",
22885b00824SAdam Dybkowski 		.description = "scheduler which enqueues to the primary worker, "
22985b00824SAdam Dybkowski 				"and only then enqueues to the secondary worker "
23037f075daSFan Zhang 				"upon failing on enqueuing to primary",
23137f075daSFan Zhang 		.mode = CDEV_SCHED_MODE_FAILOVER,
23237f075daSFan Zhang 		.ops = &scheduler_fo_ops
23337f075daSFan Zhang };
23437f075daSFan Zhang 
235520dd992SFerruh Yigit struct rte_cryptodev_scheduler *crypto_scheduler_failover = &fo_scheduler;
236