xref: /dpdk/lib/cryptodev/cryptodev_pmd.c (revision 8c76e2f6937730782baa210bf456bd19da2a9600)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #include <stdlib.h>
6 #include <sys/queue.h>
7 
8 #include <dev_driver.h>
9 #include <rte_errno.h>
10 #include <rte_string_fns.h>
11 #include <rte_malloc.h>
12 
13 #include "cryptodev_pmd.h"
14 
15 /**
16  * Parse name from argument
17  */
18 static int
19 rte_cryptodev_pmd_parse_name_arg(const char *key __rte_unused,
20 		const char *value, void *extra_args)
21 {
22 	struct rte_cryptodev_pmd_init_params *params = extra_args;
23 	int n;
24 
25 	n = strlcpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
26 	if (n >= RTE_CRYPTODEV_NAME_MAX_LEN)
27 		return -EINVAL;
28 
29 	return 0;
30 }
31 
32 /**
33  * Parse unsigned integer from argument
34  */
35 static int
36 rte_cryptodev_pmd_parse_uint_arg(const char *key __rte_unused,
37 		const char *value, void *extra_args)
38 {
39 	int i;
40 	char *end;
41 	errno = 0;
42 
43 	i = strtol(value, &end, 10);
44 	if (*end != 0 || errno != 0 || i < 0)
45 		return -EINVAL;
46 
47 	*((uint32_t *)extra_args) = i;
48 	return 0;
49 }
50 
51 int
52 rte_cryptodev_pmd_parse_input_args(
53 		struct rte_cryptodev_pmd_init_params *params,
54 		const char *args)
55 {
56 	struct rte_kvargs *kvlist = NULL;
57 	int ret = 0;
58 
59 	if (params == NULL)
60 		return -EINVAL;
61 
62 	if (args) {
63 		kvlist = rte_kvargs_parse(args,	cryptodev_pmd_valid_params);
64 		if (kvlist == NULL)
65 			return -EINVAL;
66 
67 		ret = rte_kvargs_process(kvlist,
68 				RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
69 				&rte_cryptodev_pmd_parse_uint_arg,
70 				&params->max_nb_queue_pairs);
71 		if (ret < 0)
72 			goto free_kvlist;
73 
74 		ret = rte_kvargs_process(kvlist,
75 				RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
76 				&rte_cryptodev_pmd_parse_uint_arg,
77 				&params->socket_id);
78 		if (ret < 0)
79 			goto free_kvlist;
80 
81 		ret = rte_kvargs_process(kvlist,
82 				RTE_CRYPTODEV_PMD_NAME_ARG,
83 				&rte_cryptodev_pmd_parse_name_arg,
84 				params);
85 		if (ret < 0)
86 			goto free_kvlist;
87 	}
88 
89 free_kvlist:
90 	rte_kvargs_free(kvlist);
91 	return ret;
92 }
93 
94 struct rte_cryptodev *
95 rte_cryptodev_pmd_create(const char *name,
96 		struct rte_device *device,
97 		struct rte_cryptodev_pmd_init_params *params)
98 {
99 	struct rte_cryptodev *cryptodev;
100 
101 	if (params->name[0] != '\0') {
102 		CDEV_LOG_INFO("User specified device name = %s\n", params->name);
103 		name = params->name;
104 	}
105 
106 	CDEV_LOG_INFO("Creating cryptodev %s\n", name);
107 
108 	CDEV_LOG_INFO("Initialisation parameters - name: %s,"
109 			"socket id: %d, max queue pairs: %u",
110 			name, params->socket_id, params->max_nb_queue_pairs);
111 
112 	/* allocate device structure */
113 	cryptodev = rte_cryptodev_pmd_allocate(name, params->socket_id);
114 	if (cryptodev == NULL) {
115 		CDEV_LOG_ERR("Failed to allocate crypto device for %s", name);
116 		return NULL;
117 	}
118 
119 	/* allocate private device structure */
120 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
121 		cryptodev->data->dev_private =
122 				rte_zmalloc_socket("cryptodev device private",
123 						params->private_data_size,
124 						RTE_CACHE_LINE_SIZE,
125 						params->socket_id);
126 
127 		if (cryptodev->data->dev_private == NULL) {
128 			CDEV_LOG_ERR("Cannot allocate memory for cryptodev %s"
129 					" private data", name);
130 
131 			rte_cryptodev_pmd_release_device(cryptodev);
132 			return NULL;
133 		}
134 	}
135 
136 	cryptodev->device = device;
137 
138 	/* initialise user call-back tail queue */
139 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
140 
141 	return cryptodev;
142 }
143 
144 int
145 rte_cryptodev_pmd_destroy(struct rte_cryptodev *cryptodev)
146 {
147 	int retval;
148 	void *dev_priv = cryptodev->data->dev_private;
149 
150 	CDEV_LOG_INFO("Closing crypto device %s", cryptodev->device->name);
151 
152 	/* free crypto device */
153 	retval = rte_cryptodev_pmd_release_device(cryptodev);
154 	if (retval)
155 		return retval;
156 
157 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
158 		rte_free(dev_priv);
159 
160 
161 	cryptodev->device = NULL;
162 	cryptodev->data = NULL;
163 
164 	return 0;
165 }
166 
167 void
168 rte_cryptodev_pmd_probing_finish(struct rte_cryptodev *cryptodev)
169 {
170 	if (cryptodev == NULL)
171 		return;
172 	/*
173 	 * for secondary process, at that point we expect device
174 	 * to be already 'usable', so shared data and all function
175 	 * pointers for fast-path devops have to be setup properly
176 	 * inside rte_cryptodev.
177 	 */
178 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
179 		cryptodev_fp_ops_set(rte_crypto_fp_ops +
180 				cryptodev->data->dev_id, cryptodev);
181 }
182 
183 static uint16_t
184 dummy_crypto_enqueue_burst(__rte_unused void *qp,
185 			   __rte_unused struct rte_crypto_op **ops,
186 			   __rte_unused uint16_t nb_ops)
187 {
188 	CDEV_LOG_ERR(
189 		"crypto enqueue burst requested for unconfigured device");
190 	rte_errno = ENOTSUP;
191 	return 0;
192 }
193 
194 static uint16_t
195 dummy_crypto_dequeue_burst(__rte_unused void *qp,
196 			   __rte_unused struct rte_crypto_op **ops,
197 			   __rte_unused uint16_t nb_ops)
198 {
199 	CDEV_LOG_ERR(
200 		"crypto dequeue burst requested for unconfigured device");
201 	rte_errno = ENOTSUP;
202 	return 0;
203 }
204 
205 void
206 cryptodev_fp_ops_reset(struct rte_crypto_fp_ops *fp_ops)
207 {
208 	static struct rte_cryptodev_cb_rcu dummy_cb[RTE_MAX_QUEUES_PER_PORT];
209 	static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
210 	static const struct rte_crypto_fp_ops dummy = {
211 		.enqueue_burst = dummy_crypto_enqueue_burst,
212 		.dequeue_burst = dummy_crypto_dequeue_burst,
213 		.qp = {
214 			.data = dummy_data,
215 			.enq_cb = dummy_cb,
216 			.deq_cb = dummy_cb,
217 		},
218 	};
219 
220 	*fp_ops = dummy;
221 }
222 
223 void
224 cryptodev_fp_ops_set(struct rte_crypto_fp_ops *fp_ops,
225 		     const struct rte_cryptodev *dev)
226 {
227 	fp_ops->enqueue_burst = dev->enqueue_burst;
228 	fp_ops->dequeue_burst = dev->dequeue_burst;
229 	fp_ops->qp.data = dev->data->queue_pairs;
230 	fp_ops->qp.enq_cb = dev->enq_cbs;
231 	fp_ops->qp.deq_cb = dev->deq_cbs;
232 }
233 
234 void *
235 rte_cryptodev_session_event_mdata_get(struct rte_crypto_op *op)
236 {
237 	if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
238 			op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
239 		return rte_cryptodev_sym_session_get_user_data(op->sym->session);
240 	else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC &&
241 			op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
242 		return op->asym->session->event_mdata;
243 	else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
244 			op->private_data_offset)
245 		return ((uint8_t *)op + op->private_data_offset);
246 	else
247 		return NULL;
248 }
249