xref: /dpdk/drivers/crypto/scheduler/scheduler_pmd_private.h (revision aae799d7dd4cc4d9c29cc665519cbec775b7bb6b)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #ifndef _SCHEDULER_PMD_PRIVATE_H
6 #define _SCHEDULER_PMD_PRIVATE_H
7 
8 #include "rte_cryptodev_scheduler.h"
9 
10 #define CRYPTODEV_NAME_SCHEDULER_PMD	crypto_scheduler
11 /**< Scheduler Crypto PMD device name */
12 
13 #define PER_SLAVE_BUFF_SIZE			(256)
14 
15 extern int scheduler_logtype_driver;
16 
17 #define CR_SCHED_LOG(level, fmt, args...) \
18 	rte_log(RTE_LOG_ ## level, scheduler_logtype_driver,		\
19 			"%s() line %u: "fmt "\n", __func__, __LINE__, ##args)
20 
21 struct scheduler_slave {
22 	uint8_t dev_id;
23 	uint16_t qp_id;
24 	uint32_t nb_inflight_cops;
25 
26 	uint8_t driver_id;
27 };
28 
29 struct scheduler_ctx {
30 	void *private_ctx;
31 	/**< private scheduler context pointer */
32 
33 	struct rte_cryptodev_capabilities *capabilities;
34 	uint32_t nb_capabilities;
35 
36 	uint32_t max_nb_queue_pairs;
37 
38 	struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
39 	uint32_t nb_slaves;
40 
41 	enum rte_cryptodev_scheduler_mode mode;
42 
43 	struct rte_cryptodev_scheduler_ops ops;
44 
45 	uint8_t reordering_enabled;
46 
47 	char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
48 	char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
49 	uint16_t wc_pool[RTE_MAX_LCORE];
50 	uint16_t nb_wc;
51 
52 	char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
53 	int nb_init_slaves;
54 } __rte_cache_aligned;
55 
56 struct scheduler_qp_ctx {
57 	void *private_qp_ctx;
58 
59 	uint32_t max_nb_objs;
60 
61 	struct rte_ring *order_ring;
62 	uint32_t seqn;
63 } __rte_cache_aligned;
64 
65 
66 extern uint8_t cryptodev_scheduler_driver_id;
67 
68 static __rte_always_inline uint16_t
69 get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops)
70 {
71 	uint32_t count = rte_ring_free_count(order_ring);
72 
73 	return count > nb_ops ? nb_ops : count;
74 }
75 
76 static __rte_always_inline void
77 scheduler_order_insert(struct rte_ring *order_ring,
78 		struct rte_crypto_op **ops, uint16_t nb_ops)
79 {
80 	rte_ring_sp_enqueue_burst(order_ring, (void **)ops, nb_ops, NULL);
81 }
82 
83 static __rte_always_inline uint16_t
84 scheduler_order_drain(struct rte_ring *order_ring,
85 		struct rte_crypto_op **ops, uint16_t nb_ops)
86 {
87 	struct rte_crypto_op *op;
88 	uint32_t nb_objs, nb_ops_to_deq;
89 
90 	nb_objs = rte_ring_dequeue_burst_start(order_ring, (void **)ops,
91 		nb_ops, NULL);
92 	if (nb_objs == 0)
93 		return 0;
94 
95 	for (nb_ops_to_deq = 0; nb_ops_to_deq != nb_objs; nb_ops_to_deq++) {
96 		op = ops[nb_ops_to_deq];
97 		if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
98 			break;
99 	}
100 
101 	rte_ring_dequeue_finish(order_ring, nb_ops_to_deq);
102 	return nb_ops_to_deq;
103 }
104 
105 /** device specific operations function pointer structure */
106 extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;
107 
108 #endif /* _SCHEDULER_PMD_PRIVATE_H */
109