xref: /dpdk/drivers/crypto/scheduler/scheduler_pmd_private.h (revision b79e4c00af0e7cfb8601ab0208659d226b82bd10)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef _SCHEDULER_PMD_PRIVATE_H
35 #define _SCHEDULER_PMD_PRIVATE_H
36 
37 #include "rte_cryptodev_scheduler.h"
38 
39 #define PER_SLAVE_BUFF_SIZE			(256)
40 
41 #define CS_LOG_ERR(fmt, args...)					\
42 	RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n",		\
43 		RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),			\
44 		__func__, __LINE__, ## args)
45 
46 #ifdef RTE_LIBRTE_CRYPTO_SCHEDULER_DEBUG
47 #define CS_LOG_INFO(fmt, args...)					\
48 	RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n",	\
49 		RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),			\
50 		__func__, __LINE__, ## args)
51 
52 #define CS_LOG_DBG(fmt, args...)					\
53 	RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n",	\
54 		RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),			\
55 		__func__, __LINE__, ## args)
56 #else
57 #define CS_LOG_INFO(fmt, args...)
58 #define CS_LOG_DBG(fmt, args...)
59 #endif
60 
61 struct scheduler_slave {
62 	uint8_t dev_id;
63 	uint16_t qp_id;
64 	uint32_t nb_inflight_cops;
65 
66 	enum rte_cryptodev_type dev_type;
67 };
68 
69 struct scheduler_ctx {
70 	void *private_ctx;
71 	/**< private scheduler context pointer */
72 
73 	struct rte_cryptodev_capabilities *capabilities;
74 	uint32_t nb_capabilities;
75 
76 	uint32_t max_nb_queue_pairs;
77 
78 	struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
79 	uint32_t nb_slaves;
80 
81 	enum rte_cryptodev_scheduler_mode mode;
82 
83 	struct rte_cryptodev_scheduler_ops ops;
84 
85 	uint8_t reordering_enabled;
86 
87 	char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
88 	char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
89 
90 	char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
91 	int nb_init_slaves;
92 } __rte_cache_aligned;
93 
94 struct scheduler_qp_ctx {
95 	void *private_qp_ctx;
96 
97 	uint32_t max_nb_objs;
98 
99 	struct rte_ring *order_ring;
100 	uint32_t seqn;
101 } __rte_cache_aligned;
102 
103 struct scheduler_session {
104 	struct rte_cryptodev_sym_session *sessions[
105 			RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
106 };
107 
108 static __rte_always_inline uint16_t
109 get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops)
110 {
111 	uint32_t count = rte_ring_free_count(order_ring);
112 
113 	return count > nb_ops ? nb_ops : count;
114 }
115 
116 static __rte_always_inline void
117 scheduler_order_insert(struct rte_ring *order_ring,
118 		struct rte_crypto_op **ops, uint16_t nb_ops)
119 {
120 	rte_ring_sp_enqueue_burst(order_ring, (void **)ops, nb_ops, NULL);
121 }
122 
123 #define SCHEDULER_GET_RING_OBJ(order_ring, pos, op) do {            \
124 	struct rte_crypto_op **ring = (void *)&order_ring[1];     \
125 	op = ring[(order_ring->cons.head + pos) & order_ring->mask]; \
126 } while (0)
127 
128 static __rte_always_inline uint16_t
129 scheduler_order_drain(struct rte_ring *order_ring,
130 		struct rte_crypto_op **ops, uint16_t nb_ops)
131 {
132 	struct rte_crypto_op *op;
133 	uint32_t nb_objs = rte_ring_count(order_ring);
134 	uint32_t nb_ops_to_deq = 0;
135 	uint32_t nb_ops_deqd = 0;
136 
137 	if (nb_objs > nb_ops)
138 		nb_objs = nb_ops;
139 
140 	while (nb_ops_to_deq < nb_objs) {
141 		SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op);
142 		if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
143 			break;
144 		nb_ops_to_deq++;
145 	}
146 
147 	if (nb_ops_to_deq)
148 		nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring,
149 				(void **)ops, nb_ops_to_deq, NULL);
150 
151 	return nb_ops_deqd;
152 }
153 /** device specific operations function pointer structure */
154 extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;
155 
156 #endif /* _SCHEDULER_PMD_PRIVATE_H */
157