1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
3 */
4
5 #include <string.h>
6
7 #include <rte_common.h>
8 #include <rte_malloc.h>
9 #include <cryptodev_pmd.h>
10
11 #include "null_crypto_pmd_private.h"
12
13 static const struct rte_cryptodev_capabilities null_crypto_pmd_capabilities[] = {
14 { /* NULL (AUTH) */
15 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
16 {.sym = {
17 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
18 {.auth = {
19 .algo = RTE_CRYPTO_AUTH_NULL,
20 .block_size = 1,
21 .key_size = {
22 .min = 0,
23 .max = 0,
24 .increment = 0
25 },
26 .digest_size = {
27 .min = 0,
28 .max = 0,
29 .increment = 0
30 },
31 .iv_size = { 0 }
32 }, },
33 }, },
34 },
35 { /* NULL (CIPHER) */
36 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
37 {.sym = {
38 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
39 {.cipher = {
40 .algo = RTE_CRYPTO_CIPHER_NULL,
41 .block_size = 1,
42 .key_size = {
43 .min = 0,
44 .max = 0,
45 .increment = 0
46 },
47 .iv_size = { 0 }
48 }, },
49 }, }
50 },
51 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
52 };
53
54 /** Configure device */
55 static int
null_crypto_pmd_config(__rte_unused struct rte_cryptodev * dev,__rte_unused struct rte_cryptodev_config * config)56 null_crypto_pmd_config(__rte_unused struct rte_cryptodev *dev,
57 __rte_unused struct rte_cryptodev_config *config)
58 {
59 return 0;
60 }
61
62 /** Start device */
63 static int
null_crypto_pmd_start(__rte_unused struct rte_cryptodev * dev)64 null_crypto_pmd_start(__rte_unused struct rte_cryptodev *dev)
65 {
66 return 0;
67 }
68
69 /** Stop device */
70 static void
null_crypto_pmd_stop(__rte_unused struct rte_cryptodev * dev)71 null_crypto_pmd_stop(__rte_unused struct rte_cryptodev *dev)
72 {
73 }
74
75 /** Close device */
76 static int
null_crypto_pmd_close(__rte_unused struct rte_cryptodev * dev)77 null_crypto_pmd_close(__rte_unused struct rte_cryptodev *dev)
78 {
79 return 0;
80 }
81
82 /** Get device statistics */
83 static void
null_crypto_pmd_stats_get(struct rte_cryptodev * dev,struct rte_cryptodev_stats * stats)84 null_crypto_pmd_stats_get(struct rte_cryptodev *dev,
85 struct rte_cryptodev_stats *stats)
86 {
87 int qp_id;
88
89 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
90 struct null_crypto_qp *qp = dev->data->queue_pairs[qp_id];
91
92 stats->enqueued_count += qp->qp_stats.enqueued_count;
93 stats->dequeued_count += qp->qp_stats.dequeued_count;
94
95 stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
96 stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
97 }
98 }
99
100 /** Reset device statistics */
101 static void
null_crypto_pmd_stats_reset(struct rte_cryptodev * dev)102 null_crypto_pmd_stats_reset(struct rte_cryptodev *dev)
103 {
104 int qp_id;
105
106 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
107 struct null_crypto_qp *qp = dev->data->queue_pairs[qp_id];
108
109 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
110 }
111 }
112
113
114 /** Get device info */
115 static void
null_crypto_pmd_info_get(struct rte_cryptodev * dev,struct rte_cryptodev_info * dev_info)116 null_crypto_pmd_info_get(struct rte_cryptodev *dev,
117 struct rte_cryptodev_info *dev_info)
118 {
119 struct null_crypto_private *internals = dev->data->dev_private;
120
121 if (dev_info != NULL) {
122 dev_info->driver_id = dev->driver_id;
123 dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
124 /* No limit of number of sessions */
125 dev_info->sym.max_nb_sessions = 0;
126 dev_info->feature_flags = dev->feature_flags;
127 dev_info->capabilities = null_crypto_pmd_capabilities;
128 }
129 }
130
131 /** Release queue pair */
132 static int
null_crypto_pmd_qp_release(struct rte_cryptodev * dev,uint16_t qp_id)133 null_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
134 {
135 if (dev->data->queue_pairs[qp_id] != NULL) {
136 struct null_crypto_qp *qp = dev->data->queue_pairs[qp_id];
137
138 rte_ring_free(qp->processed_pkts);
139
140 rte_free(dev->data->queue_pairs[qp_id]);
141 dev->data->queue_pairs[qp_id] = NULL;
142 }
143 return 0;
144 }
145
146 /** set a unique name for the queue pair based on it's name, dev_id and qp_id */
147 static int
null_crypto_pmd_qp_set_unique_name(struct rte_cryptodev * dev,struct null_crypto_qp * qp)148 null_crypto_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
149 struct null_crypto_qp *qp)
150 {
151 unsigned n = snprintf(qp->name, sizeof(qp->name),
152 "null_crypto_pmd_%u_qp_%u",
153 dev->data->dev_id, qp->id);
154
155 if (n >= sizeof(qp->name))
156 return -1;
157
158 return 0;
159 }
160
161 /** Create a ring to place process packets on */
162 static struct rte_ring *
null_crypto_pmd_qp_create_processed_pkts_ring(struct null_crypto_qp * qp,unsigned ring_size,int socket_id)163 null_crypto_pmd_qp_create_processed_pkts_ring(struct null_crypto_qp *qp,
164 unsigned ring_size, int socket_id)
165 {
166 struct rte_ring *r;
167
168 r = rte_ring_lookup(qp->name);
169 if (r) {
170 if (rte_ring_get_size(r) >= ring_size) {
171 NULL_LOG(INFO,
172 "Reusing existing ring %s for "
173 " processed packets", qp->name);
174 return r;
175 }
176
177 NULL_LOG(INFO,
178 "Unable to reuse existing ring %s for "
179 " processed packets", qp->name);
180 return NULL;
181 }
182
183 return rte_ring_create(qp->name, ring_size, socket_id,
184 RING_F_SP_ENQ | RING_F_SC_DEQ);
185 }
186
187 /** Setup a queue pair */
188 static int
null_crypto_pmd_qp_setup(struct rte_cryptodev * dev,uint16_t qp_id,const struct rte_cryptodev_qp_conf * qp_conf,int socket_id)189 null_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
190 const struct rte_cryptodev_qp_conf *qp_conf,
191 int socket_id)
192 {
193 struct null_crypto_private *internals = dev->data->dev_private;
194 struct null_crypto_qp *qp;
195 int retval;
196
197 if (qp_id >= internals->max_nb_qpairs) {
198 NULL_LOG(ERR, "Invalid qp_id %u, greater than maximum "
199 "number of queue pairs supported (%u).",
200 qp_id, internals->max_nb_qpairs);
201 return (-EINVAL);
202 }
203
204 /* Free memory prior to re-allocation if needed. */
205 if (dev->data->queue_pairs[qp_id] != NULL)
206 null_crypto_pmd_qp_release(dev, qp_id);
207
208 /* Allocate the queue pair data structure. */
209 qp = rte_zmalloc_socket("Null Crypto PMD Queue Pair", sizeof(*qp),
210 RTE_CACHE_LINE_SIZE, socket_id);
211 if (qp == NULL) {
212 NULL_LOG(ERR, "Failed to allocate queue pair memory");
213 return (-ENOMEM);
214 }
215
216 qp->id = qp_id;
217 dev->data->queue_pairs[qp_id] = qp;
218
219 retval = null_crypto_pmd_qp_set_unique_name(dev, qp);
220 if (retval) {
221 NULL_LOG(ERR, "Failed to create unique name for null "
222 "crypto device");
223
224 goto qp_setup_cleanup;
225 }
226
227 qp->processed_pkts = null_crypto_pmd_qp_create_processed_pkts_ring(qp,
228 qp_conf->nb_descriptors, socket_id);
229 if (qp->processed_pkts == NULL) {
230 NULL_LOG(ERR, "Failed to create unique name for null "
231 "crypto device");
232 goto qp_setup_cleanup;
233 }
234
235 qp->sess_mp = qp_conf->mp_session;
236
237 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
238
239 return 0;
240
241 qp_setup_cleanup:
242 rte_free(qp);
243
244 return -1;
245 }
246
247 /** Returns the size of the NULL crypto session structure */
248 static unsigned
null_crypto_pmd_sym_session_get_size(struct rte_cryptodev * dev __rte_unused)249 null_crypto_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
250 {
251 return sizeof(struct null_crypto_session);
252 }
253
254 /** Configure a null crypto session from a crypto xform chain */
255 static int
null_crypto_pmd_sym_session_configure(struct rte_cryptodev * dev __rte_unused,struct rte_crypto_sym_xform * xform,struct rte_cryptodev_sym_session * sess)256 null_crypto_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
257 struct rte_crypto_sym_xform *xform,
258 struct rte_cryptodev_sym_session *sess)
259 {
260 void *sess_private_data;
261 int ret;
262
263 if (unlikely(sess == NULL)) {
264 NULL_LOG(ERR, "invalid session struct");
265 return -EINVAL;
266 }
267
268 sess_private_data = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
269
270 ret = null_crypto_set_session_parameters(sess_private_data, xform);
271 if (ret != 0) {
272 NULL_LOG(ERR, "failed configure session parameters");
273 return ret;
274 }
275
276 return 0;
277 }
278
279 /** Clear the memory of session so it doesn't leave key material behind */
280 static void
null_crypto_pmd_sym_session_clear(struct rte_cryptodev * dev __rte_unused,struct rte_cryptodev_sym_session * sess __rte_unused)281 null_crypto_pmd_sym_session_clear(struct rte_cryptodev *dev __rte_unused,
282 struct rte_cryptodev_sym_session *sess __rte_unused)
283 {}
284
285 static struct rte_cryptodev_ops pmd_ops = {
286 .dev_configure = null_crypto_pmd_config,
287 .dev_start = null_crypto_pmd_start,
288 .dev_stop = null_crypto_pmd_stop,
289 .dev_close = null_crypto_pmd_close,
290
291 .stats_get = null_crypto_pmd_stats_get,
292 .stats_reset = null_crypto_pmd_stats_reset,
293
294 .dev_infos_get = null_crypto_pmd_info_get,
295
296 .queue_pair_setup = null_crypto_pmd_qp_setup,
297 .queue_pair_release = null_crypto_pmd_qp_release,
298
299 .sym_session_get_size = null_crypto_pmd_sym_session_get_size,
300 .sym_session_configure = null_crypto_pmd_sym_session_configure,
301 .sym_session_clear = null_crypto_pmd_sym_session_clear
302 };
303
304 struct rte_cryptodev_ops *null_crypto_pmd_ops = &pmd_ops;
305