xref: /dpdk/drivers/crypto/scheduler/scheduler_pmd_private.h (revision 945acb4a0d644d194f1823084a234f9c286dcf8c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #ifndef _SCHEDULER_PMD_PRIVATE_H
6 #define _SCHEDULER_PMD_PRIVATE_H
7 
8 #include "rte_cryptodev_scheduler.h"
9 
10 #define CRYPTODEV_NAME_SCHEDULER_PMD	crypto_scheduler
11 /**< Scheduler Crypto PMD device name */
12 
13 #define PER_SLAVE_BUFF_SIZE			(256)
14 
15 #define CS_LOG_ERR(fmt, args...)					\
16 	RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n",		\
17 		RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),			\
18 		__func__, __LINE__, ## args)
19 
20 #ifdef RTE_LIBRTE_CRYPTO_SCHEDULER_DEBUG
21 #define CS_LOG_INFO(fmt, args...)					\
22 	RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n",	\
23 		RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),			\
24 		__func__, __LINE__, ## args)
25 
26 #define CS_LOG_DBG(fmt, args...)					\
27 	RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n",	\
28 		RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),			\
29 		__func__, __LINE__, ## args)
30 #else
31 #define CS_LOG_INFO(fmt, args...)
32 #define CS_LOG_DBG(fmt, args...)
33 #endif
34 
35 struct scheduler_slave {
36 	uint8_t dev_id;
37 	uint16_t qp_id;
38 	uint32_t nb_inflight_cops;
39 
40 	uint8_t driver_id;
41 };
42 
43 struct scheduler_ctx {
44 	void *private_ctx;
45 	/**< private scheduler context pointer */
46 
47 	struct rte_cryptodev_capabilities *capabilities;
48 	uint32_t nb_capabilities;
49 
50 	uint32_t max_nb_queue_pairs;
51 
52 	struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
53 	uint32_t nb_slaves;
54 
55 	enum rte_cryptodev_scheduler_mode mode;
56 
57 	struct rte_cryptodev_scheduler_ops ops;
58 
59 	uint8_t reordering_enabled;
60 
61 	char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
62 	char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
63 	uint16_t wc_pool[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
64 	uint16_t nb_wc;
65 
66 	char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
67 	int nb_init_slaves;
68 } __rte_cache_aligned;
69 
70 struct scheduler_qp_ctx {
71 	void *private_qp_ctx;
72 
73 	uint32_t max_nb_objs;
74 
75 	struct rte_ring *order_ring;
76 	uint32_t seqn;
77 } __rte_cache_aligned;
78 
79 
80 extern uint8_t cryptodev_driver_id;
81 
82 static __rte_always_inline uint16_t
83 get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops)
84 {
85 	uint32_t count = rte_ring_free_count(order_ring);
86 
87 	return count > nb_ops ? nb_ops : count;
88 }
89 
90 static __rte_always_inline void
91 scheduler_order_insert(struct rte_ring *order_ring,
92 		struct rte_crypto_op **ops, uint16_t nb_ops)
93 {
94 	rte_ring_sp_enqueue_burst(order_ring, (void **)ops, nb_ops, NULL);
95 }
96 
97 #define SCHEDULER_GET_RING_OBJ(order_ring, pos, op) do {            \
98 	struct rte_crypto_op **ring = (void *)&order_ring[1];     \
99 	op = ring[(order_ring->cons.head + pos) & order_ring->mask]; \
100 } while (0)
101 
102 static __rte_always_inline uint16_t
103 scheduler_order_drain(struct rte_ring *order_ring,
104 		struct rte_crypto_op **ops, uint16_t nb_ops)
105 {
106 	struct rte_crypto_op *op;
107 	uint32_t nb_objs = rte_ring_count(order_ring);
108 	uint32_t nb_ops_to_deq = 0;
109 	uint32_t nb_ops_deqd = 0;
110 
111 	if (nb_objs > nb_ops)
112 		nb_objs = nb_ops;
113 
114 	while (nb_ops_to_deq < nb_objs) {
115 		SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op);
116 		if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
117 			break;
118 		nb_ops_to_deq++;
119 	}
120 
121 	if (nb_ops_to_deq)
122 		nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring,
123 				(void **)ops, nb_ops_to_deq, NULL);
124 
125 	return nb_ops_deqd;
126 }
127 /** device specific operations function pointer structure */
128 extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;
129 
130 #endif /* _SCHEDULER_PMD_PRIVATE_H */
131