xref: /dpdk/drivers/crypto/ccp/rte_ccp_pmd.c (revision e4373bf1b3f51715bf66e87c0134e2c217e4612c)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  */
4 
5 #include <rte_string_fns.h>
6 #include <rte_bus_pci.h>
7 #include <rte_bus_vdev.h>
8 #include <rte_common.h>
9 #include <rte_config.h>
10 #include <rte_cryptodev.h>
11 #include <rte_cryptodev_pmd.h>
12 #include <rte_pci.h>
13 #include <rte_dev.h>
14 #include <rte_malloc.h>
15 
16 #include "ccp_crypto.h"
17 #include "ccp_dev.h"
18 #include "ccp_pmd_private.h"
19 
20 /**
21  * Global static parameter used to find if CCP device is already initialized.
22  */
23 static unsigned int ccp_pmd_init_done;
24 uint8_t ccp_cryptodev_driver_id;
25 uint8_t cryptodev_cnt;
26 
27 struct ccp_pmd_init_params {
28 	struct rte_cryptodev_pmd_init_params def_p;
29 	bool auth_opt;
30 };
31 
32 #define CCP_CRYPTODEV_PARAM_NAME		("name")
33 #define CCP_CRYPTODEV_PARAM_SOCKET_ID		("socket_id")
34 #define CCP_CRYPTODEV_PARAM_MAX_NB_QP		("max_nb_queue_pairs")
35 #define CCP_CRYPTODEV_PARAM_AUTH_OPT		("ccp_auth_opt")
36 
37 const char *ccp_pmd_valid_params[] = {
38 	CCP_CRYPTODEV_PARAM_NAME,
39 	CCP_CRYPTODEV_PARAM_SOCKET_ID,
40 	CCP_CRYPTODEV_PARAM_MAX_NB_QP,
41 	CCP_CRYPTODEV_PARAM_AUTH_OPT,
42 };
43 
44 /** ccp pmd auth option */
45 enum ccp_pmd_auth_opt {
46 	CCP_PMD_AUTH_OPT_CCP = 0,
47 	CCP_PMD_AUTH_OPT_CPU,
48 };
49 
50 /** parse integer from integer argument */
51 static int
52 parse_integer_arg(const char *key __rte_unused,
53 		  const char *value, void *extra_args)
54 {
55 	int *i = (int *) extra_args;
56 
57 	*i = atoi(value);
58 	if (*i < 0) {
59 		CCP_LOG_ERR("Argument has to be positive.\n");
60 		return -EINVAL;
61 	}
62 
63 	return 0;
64 }
65 
66 /** parse name argument */
67 static int
68 parse_name_arg(const char *key __rte_unused,
69 	       const char *value, void *extra_args)
70 {
71 	struct rte_cryptodev_pmd_init_params *params = extra_args;
72 
73 	if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
74 		CCP_LOG_ERR("Invalid name %s, should be less than "
75 			    "%u bytes.\n", value,
76 			    RTE_CRYPTODEV_NAME_MAX_LEN - 1);
77 		return -EINVAL;
78 	}
79 
80 	strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
81 
82 	return 0;
83 }
84 
85 /** parse authentication operation option */
86 static int
87 parse_auth_opt_arg(const char *key __rte_unused,
88 		   const char *value, void *extra_args)
89 {
90 	struct ccp_pmd_init_params *params = extra_args;
91 	int i;
92 
93 	i = atoi(value);
94 	if (i < CCP_PMD_AUTH_OPT_CCP || i > CCP_PMD_AUTH_OPT_CPU) {
95 		CCP_LOG_ERR("Invalid ccp pmd auth option. "
96 			    "0->auth on CCP(default), "
97 			    "1->auth on CPU\n");
98 		return -EINVAL;
99 	}
100 	params->auth_opt = i;
101 	return 0;
102 }
103 
104 static int
105 ccp_pmd_parse_input_args(struct ccp_pmd_init_params *params,
106 			 const char *input_args)
107 {
108 	struct rte_kvargs *kvlist = NULL;
109 	int ret = 0;
110 
111 	if (params == NULL)
112 		return -EINVAL;
113 
114 	if (input_args) {
115 		kvlist = rte_kvargs_parse(input_args,
116 					  ccp_pmd_valid_params);
117 		if (kvlist == NULL)
118 			return -1;
119 
120 		ret = rte_kvargs_process(kvlist,
121 					 CCP_CRYPTODEV_PARAM_MAX_NB_QP,
122 					 &parse_integer_arg,
123 					 &params->def_p.max_nb_queue_pairs);
124 		if (ret < 0)
125 			goto free_kvlist;
126 
127 		ret = rte_kvargs_process(kvlist,
128 					 CCP_CRYPTODEV_PARAM_SOCKET_ID,
129 					 &parse_integer_arg,
130 					 &params->def_p.socket_id);
131 		if (ret < 0)
132 			goto free_kvlist;
133 
134 		ret = rte_kvargs_process(kvlist,
135 					 CCP_CRYPTODEV_PARAM_NAME,
136 					 &parse_name_arg,
137 					 &params->def_p);
138 		if (ret < 0)
139 			goto free_kvlist;
140 
141 		ret = rte_kvargs_process(kvlist,
142 					 CCP_CRYPTODEV_PARAM_AUTH_OPT,
143 					 &parse_auth_opt_arg,
144 					 params);
145 		if (ret < 0)
146 			goto free_kvlist;
147 
148 	}
149 
150 free_kvlist:
151 	rte_kvargs_free(kvlist);
152 	return ret;
153 }
154 
155 static struct ccp_session *
156 get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op)
157 {
158 	struct ccp_session *sess = NULL;
159 
160 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
161 		if (unlikely(op->sym->session == NULL))
162 			return NULL;
163 
164 		sess = (struct ccp_session *)
165 			get_sym_session_private_data(
166 				op->sym->session,
167 				ccp_cryptodev_driver_id);
168 	} else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
169 		void *_sess;
170 		void *_sess_private_data = NULL;
171 		struct ccp_private *internals;
172 
173 		if (rte_mempool_get(qp->sess_mp, &_sess))
174 			return NULL;
175 		if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
176 			return NULL;
177 
178 		sess = (struct ccp_session *)_sess_private_data;
179 
180 		internals = (struct ccp_private *)qp->dev->data->dev_private;
181 		if (unlikely(ccp_set_session_parameters(sess, op->sym->xform,
182 							internals) != 0)) {
183 			rte_mempool_put(qp->sess_mp, _sess);
184 			rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
185 			sess = NULL;
186 		}
187 		op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
188 		set_sym_session_private_data(op->sym->session,
189 					 ccp_cryptodev_driver_id,
190 					 _sess_private_data);
191 	}
192 
193 	return sess;
194 }
195 
196 static uint16_t
197 ccp_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
198 		      uint16_t nb_ops)
199 {
200 	struct ccp_session *sess = NULL;
201 	struct ccp_qp *qp = queue_pair;
202 	struct ccp_queue *cmd_q;
203 	struct rte_cryptodev *dev = qp->dev;
204 	uint16_t i, enq_cnt = 0, slots_req = 0;
205 	uint16_t tmp_ops = nb_ops, b_idx, cur_ops = 0;
206 
207 	if (nb_ops == 0)
208 		return 0;
209 
210 	if (unlikely(rte_ring_full(qp->processed_pkts) != 0))
211 		return 0;
212 	if (tmp_ops >= cryptodev_cnt)
213 		cur_ops = nb_ops / cryptodev_cnt + (nb_ops)%cryptodev_cnt;
214 	else
215 		cur_ops = tmp_ops;
216 	while (tmp_ops)	{
217 		b_idx = nb_ops - tmp_ops;
218 		slots_req = 0;
219 		if (cur_ops <= tmp_ops) {
220 			tmp_ops -= cur_ops;
221 		} else {
222 			cur_ops = tmp_ops;
223 			tmp_ops = 0;
224 		}
225 		for (i = 0; i < cur_ops; i++) {
226 			sess = get_ccp_session(qp, ops[i + b_idx]);
227 			if (unlikely(sess == NULL) && (i == 0)) {
228 				qp->qp_stats.enqueue_err_count++;
229 				return 0;
230 			} else if (sess == NULL) {
231 				cur_ops = i;
232 				break;
233 			}
234 			slots_req += ccp_compute_slot_count(sess);
235 		}
236 
237 		cmd_q = ccp_allot_queue(dev, slots_req);
238 		if (unlikely(cmd_q == NULL))
239 			return 0;
240 		enq_cnt += process_ops_to_enqueue(qp, ops, cmd_q, cur_ops,
241 				nb_ops, slots_req, b_idx);
242 		i++;
243 	}
244 
245 	qp->qp_stats.enqueued_count += enq_cnt;
246 	return enq_cnt;
247 }
248 
249 static uint16_t
250 ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
251 		uint16_t nb_ops)
252 {
253 	struct ccp_qp *qp = queue_pair;
254 	uint16_t nb_dequeued = 0, i, total_nb_ops;
255 
256 	nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops, &total_nb_ops);
257 
258 	if (total_nb_ops) {
259 		while (nb_dequeued != total_nb_ops) {
260 			nb_dequeued = process_ops_to_dequeue(qp,
261 					ops, nb_ops, &total_nb_ops);
262 		}
263 	}
264 
265 	/* Free session if a session-less crypto op */
266 	for (i = 0; i < nb_dequeued; i++)
267 		if (unlikely(ops[i]->sess_type ==
268 			     RTE_CRYPTO_OP_SESSIONLESS)) {
269 			struct ccp_session *sess = (struct ccp_session *)
270 					get_sym_session_private_data(
271 						ops[i]->sym->session,
272 						ccp_cryptodev_driver_id);
273 
274 			rte_mempool_put(qp->sess_mp_priv,
275 					sess);
276 			rte_mempool_put(qp->sess_mp,
277 					ops[i]->sym->session);
278 			ops[i]->sym->session = NULL;
279 		}
280 	qp->qp_stats.dequeued_count += nb_dequeued;
281 
282 	return nb_dequeued;
283 }
284 
285 /*
286  * The set of PCI devices this driver supports
287  */
288 static struct rte_pci_id ccp_pci_id[] = {
289 	{
290 		RTE_PCI_DEVICE(0x1022, 0x1456), /* AMD CCP-5a */
291 	},
292 	{
293 		RTE_PCI_DEVICE(0x1022, 0x1468), /* AMD CCP-5b */
294 	},
295 	{.device_id = 0},
296 };
297 
298 /** Remove ccp pmd */
299 static int
300 cryptodev_ccp_remove(struct rte_vdev_device *dev)
301 {
302 	const char *name;
303 
304 	ccp_pmd_init_done = 0;
305 	name = rte_vdev_device_name(dev);
306 	if (name == NULL)
307 		return -EINVAL;
308 
309 	RTE_LOG(INFO, PMD, "Closing ccp device %s on numa socket %u\n",
310 			name, rte_socket_id());
311 
312 	return 0;
313 }
314 
315 /** Create crypto device */
316 static int
317 cryptodev_ccp_create(const char *name,
318 		     struct rte_vdev_device *vdev,
319 		     struct ccp_pmd_init_params *init_params)
320 {
321 	struct rte_cryptodev *dev;
322 	struct ccp_private *internals;
323 
324 	if (init_params->def_p.name[0] == '\0')
325 		strlcpy(init_params->def_p.name, name,
326 			sizeof(init_params->def_p.name));
327 
328 	dev = rte_cryptodev_pmd_create(init_params->def_p.name,
329 				       &vdev->device,
330 				       &init_params->def_p);
331 	if (dev == NULL) {
332 		CCP_LOG_ERR("failed to create cryptodev vdev");
333 		goto init_error;
334 	}
335 
336 	cryptodev_cnt = ccp_probe_devices(ccp_pci_id);
337 
338 	if (cryptodev_cnt == 0) {
339 		CCP_LOG_ERR("failed to detect CCP crypto device");
340 		goto init_error;
341 	}
342 
343 	printf("CCP : Crypto device count = %d\n", cryptodev_cnt);
344 	dev->driver_id = ccp_cryptodev_driver_id;
345 
346 	/* register rx/tx burst functions for data path */
347 	dev->dev_ops = ccp_pmd_ops;
348 	dev->enqueue_burst = ccp_pmd_enqueue_burst;
349 	dev->dequeue_burst = ccp_pmd_dequeue_burst;
350 
351 	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
352 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
353 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
354 
355 	internals = dev->data->dev_private;
356 
357 	internals->max_nb_qpairs = init_params->def_p.max_nb_queue_pairs;
358 	internals->auth_opt = init_params->auth_opt;
359 	internals->crypto_num_dev = cryptodev_cnt;
360 
361 	return 0;
362 
363 init_error:
364 	CCP_LOG_ERR("driver %s: %s() failed",
365 		    init_params->def_p.name, __func__);
366 	cryptodev_ccp_remove(vdev);
367 
368 	return -EFAULT;
369 }
370 
371 /** Probe ccp pmd */
372 static int
373 cryptodev_ccp_probe(struct rte_vdev_device *vdev)
374 {
375 	int rc = 0;
376 	const char *name;
377 	struct ccp_pmd_init_params init_params = {
378 		.def_p = {
379 			"",
380 			sizeof(struct ccp_private),
381 			rte_socket_id(),
382 			CCP_PMD_MAX_QUEUE_PAIRS
383 		},
384 		.auth_opt = CCP_PMD_AUTH_OPT_CCP,
385 	};
386 	const char *input_args;
387 
388 	if (ccp_pmd_init_done) {
389 		RTE_LOG(INFO, PMD, "CCP PMD already initialized\n");
390 		return -EFAULT;
391 	}
392 	name = rte_vdev_device_name(vdev);
393 	if (name == NULL)
394 		return -EINVAL;
395 
396 	input_args = rte_vdev_device_args(vdev);
397 	ccp_pmd_parse_input_args(&init_params, input_args);
398 	init_params.def_p.max_nb_queue_pairs = CCP_PMD_MAX_QUEUE_PAIRS;
399 
400 	RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
401 		init_params.def_p.socket_id);
402 	RTE_LOG(INFO, PMD, "Max number of queue pairs = %d\n",
403 		init_params.def_p.max_nb_queue_pairs);
404 	RTE_LOG(INFO, PMD, "Authentication offload to %s\n",
405 		((init_params.auth_opt == 0) ? "CCP" : "CPU"));
406 
407 	rc = cryptodev_ccp_create(name, vdev, &init_params);
408 	if (rc)
409 		return rc;
410 	ccp_pmd_init_done = 1;
411 	return 0;
412 }
413 
414 static struct rte_vdev_driver cryptodev_ccp_pmd_drv = {
415 	.probe = cryptodev_ccp_probe,
416 	.remove = cryptodev_ccp_remove
417 };
418 
419 static struct cryptodev_driver ccp_crypto_drv;
420 
421 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CCP_PMD, cryptodev_ccp_pmd_drv);
422 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CCP_PMD,
423 	"max_nb_queue_pairs=<int> "
424 	"socket_id=<int> "
425 	"ccp_auth_opt=<int>");
426 RTE_PMD_REGISTER_CRYPTO_DRIVER(ccp_crypto_drv, cryptodev_ccp_pmd_drv.driver,
427 			       ccp_cryptodev_driver_id);
428