1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
3 */
4
5 #include <stdlib.h>
6 #include <sys/queue.h>
7
8 #include <dev_driver.h>
9 #include <rte_errno.h>
10 #include <rte_string_fns.h>
11 #include <rte_malloc.h>
12
13 #include "cryptodev_pmd.h"
14
15 /**
16 * Parse name from argument
17 */
18 static int
rte_cryptodev_pmd_parse_name_arg(const char * key __rte_unused,const char * value,void * extra_args)19 rte_cryptodev_pmd_parse_name_arg(const char *key __rte_unused,
20 const char *value, void *extra_args)
21 {
22 struct rte_cryptodev_pmd_init_params *params = extra_args;
23 int n;
24
25 if (value == NULL || extra_args == NULL)
26 return -EINVAL;
27
28 n = strlcpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
29 if (n >= RTE_CRYPTODEV_NAME_MAX_LEN)
30 return -EINVAL;
31
32 return 0;
33 }
34
35 /**
36 * Parse unsigned integer from argument
37 */
38 static int
rte_cryptodev_pmd_parse_uint_arg(const char * key __rte_unused,const char * value,void * extra_args)39 rte_cryptodev_pmd_parse_uint_arg(const char *key __rte_unused,
40 const char *value, void *extra_args)
41 {
42 int i;
43 char *end;
44
45 if (value == NULL || extra_args == NULL)
46 return -EINVAL;
47
48 errno = 0;
49
50 i = strtol(value, &end, 10);
51 if (*end != 0 || errno != 0 || i < 0)
52 return -EINVAL;
53
54 *((uint32_t *)extra_args) = i;
55 return 0;
56 }
57
58 int
rte_cryptodev_pmd_parse_input_args(struct rte_cryptodev_pmd_init_params * params,const char * args)59 rte_cryptodev_pmd_parse_input_args(
60 struct rte_cryptodev_pmd_init_params *params,
61 const char *args)
62 {
63 struct rte_kvargs *kvlist = NULL;
64 int ret = 0;
65
66 if (params == NULL)
67 return -EINVAL;
68
69 if (args) {
70 kvlist = rte_kvargs_parse(args, cryptodev_pmd_valid_params);
71 if (kvlist == NULL)
72 return -EINVAL;
73
74 ret = rte_kvargs_process(kvlist,
75 RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
76 &rte_cryptodev_pmd_parse_uint_arg,
77 ¶ms->max_nb_queue_pairs);
78 if (ret < 0)
79 goto free_kvlist;
80
81 ret = rte_kvargs_process(kvlist,
82 RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
83 &rte_cryptodev_pmd_parse_uint_arg,
84 ¶ms->socket_id);
85 if (ret < 0)
86 goto free_kvlist;
87
88 ret = rte_kvargs_process(kvlist,
89 RTE_CRYPTODEV_PMD_NAME_ARG,
90 &rte_cryptodev_pmd_parse_name_arg,
91 params);
92 if (ret < 0)
93 goto free_kvlist;
94 }
95
96 free_kvlist:
97 rte_kvargs_free(kvlist);
98 return ret;
99 }
100
101 struct rte_cryptodev *
rte_cryptodev_pmd_create(const char * name,struct rte_device * device,struct rte_cryptodev_pmd_init_params * params)102 rte_cryptodev_pmd_create(const char *name,
103 struct rte_device *device,
104 struct rte_cryptodev_pmd_init_params *params)
105 {
106 struct rte_cryptodev *cryptodev;
107
108 if (params->name[0] != '\0') {
109 CDEV_LOG_INFO("User specified device name = %s", params->name);
110 name = params->name;
111 }
112
113 CDEV_LOG_INFO("Creating cryptodev %s", name);
114
115 CDEV_LOG_INFO("Initialisation parameters - name: %s,"
116 "socket id: %d, max queue pairs: %u",
117 name, params->socket_id, params->max_nb_queue_pairs);
118
119 /* allocate device structure */
120 cryptodev = rte_cryptodev_pmd_allocate(name, params->socket_id);
121 if (cryptodev == NULL) {
122 CDEV_LOG_ERR("Failed to allocate crypto device for %s", name);
123 return NULL;
124 }
125
126 /* allocate private device structure */
127 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
128 cryptodev->data->dev_private =
129 rte_zmalloc_socket("cryptodev device private",
130 params->private_data_size,
131 RTE_CACHE_LINE_SIZE,
132 params->socket_id);
133
134 if (cryptodev->data->dev_private == NULL) {
135 CDEV_LOG_ERR("Cannot allocate memory for cryptodev %s"
136 " private data", name);
137
138 rte_cryptodev_pmd_release_device(cryptodev);
139 return NULL;
140 }
141 }
142
143 cryptodev->device = device;
144
145 /* initialise user call-back tail queue */
146 TAILQ_INIT(&(cryptodev->link_intr_cbs));
147
148 return cryptodev;
149 }
150
151 int
rte_cryptodev_pmd_destroy(struct rte_cryptodev * cryptodev)152 rte_cryptodev_pmd_destroy(struct rte_cryptodev *cryptodev)
153 {
154 int retval;
155 void *dev_priv = cryptodev->data->dev_private;
156
157 CDEV_LOG_INFO("Closing crypto device %s", cryptodev->device->name);
158
159 /* free crypto device */
160 retval = rte_cryptodev_pmd_release_device(cryptodev);
161 if (retval)
162 return retval;
163
164 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
165 rte_free(dev_priv);
166
167
168 cryptodev->device = NULL;
169 cryptodev->data = NULL;
170
171 return 0;
172 }
173
174 void
rte_cryptodev_pmd_probing_finish(struct rte_cryptodev * cryptodev)175 rte_cryptodev_pmd_probing_finish(struct rte_cryptodev *cryptodev)
176 {
177 if (cryptodev == NULL)
178 return;
179 /*
180 * for secondary process, at that point we expect device
181 * to be already 'usable', so shared data and all function
182 * pointers for fast-path devops have to be setup properly
183 * inside rte_cryptodev.
184 */
185 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
186 cryptodev_fp_ops_set(rte_crypto_fp_ops +
187 cryptodev->data->dev_id, cryptodev);
188 }
189
190 static uint16_t
dummy_crypto_enqueue_burst(__rte_unused void * qp,__rte_unused struct rte_crypto_op ** ops,__rte_unused uint16_t nb_ops)191 dummy_crypto_enqueue_burst(__rte_unused void *qp,
192 __rte_unused struct rte_crypto_op **ops,
193 __rte_unused uint16_t nb_ops)
194 {
195 CDEV_LOG_ERR(
196 "crypto enqueue burst requested for unconfigured device");
197 rte_errno = ENOTSUP;
198 return 0;
199 }
200
201 static uint16_t
dummy_crypto_dequeue_burst(__rte_unused void * qp,__rte_unused struct rte_crypto_op ** ops,__rte_unused uint16_t nb_ops)202 dummy_crypto_dequeue_burst(__rte_unused void *qp,
203 __rte_unused struct rte_crypto_op **ops,
204 __rte_unused uint16_t nb_ops)
205 {
206 CDEV_LOG_ERR(
207 "crypto dequeue burst requested for unconfigured device");
208 rte_errno = ENOTSUP;
209 return 0;
210 }
211
212 void
cryptodev_fp_ops_reset(struct rte_crypto_fp_ops * fp_ops)213 cryptodev_fp_ops_reset(struct rte_crypto_fp_ops *fp_ops)
214 {
215 static struct rte_cryptodev_cb_rcu dummy_cb[RTE_MAX_QUEUES_PER_PORT];
216 static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
217 static const struct rte_crypto_fp_ops dummy = {
218 .enqueue_burst = dummy_crypto_enqueue_burst,
219 .dequeue_burst = dummy_crypto_dequeue_burst,
220 .qp = {
221 .data = dummy_data,
222 .enq_cb = dummy_cb,
223 .deq_cb = dummy_cb,
224 },
225 };
226
227 *fp_ops = dummy;
228 }
229
230 void
cryptodev_fp_ops_set(struct rte_crypto_fp_ops * fp_ops,const struct rte_cryptodev * dev)231 cryptodev_fp_ops_set(struct rte_crypto_fp_ops *fp_ops,
232 const struct rte_cryptodev *dev)
233 {
234 fp_ops->enqueue_burst = dev->enqueue_burst;
235 fp_ops->dequeue_burst = dev->dequeue_burst;
236 fp_ops->qp.data = dev->data->queue_pairs;
237 fp_ops->qp.enq_cb = dev->enq_cbs;
238 fp_ops->qp.deq_cb = dev->deq_cbs;
239 fp_ops->qp_depth_used = dev->qp_depth_used;
240 }
241
242 void *
rte_cryptodev_session_event_mdata_get(struct rte_crypto_op * op)243 rte_cryptodev_session_event_mdata_get(struct rte_crypto_op *op)
244 {
245 if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
246 op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
247 return rte_cryptodev_sym_session_get_user_data(op->sym->session);
248 else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC &&
249 op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
250 return op->asym->session->event_mdata;
251 else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
252 op->private_data_offset)
253 return ((uint8_t *)op + op->private_data_offset);
254 else
255 return NULL;
256 }
257