xref: /dpdk/drivers/crypto/ipsec_mb/ipsec_mb_private.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4 
5 #include <bus_vdev_driver.h>
6 #include <rte_common.h>
7 #include <rte_cryptodev.h>
8 #include <rte_errno.h>
9 
10 #include "ipsec_mb_private.h"
11 
12 RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);
13 
14 struct ipsec_mb_internals ipsec_mb_pmds[IPSEC_MB_N_PMD_TYPES];
15 int ipsec_mb_logtype_driver;
16 enum ipsec_mb_vector_mode vector_mode;
17 
18 /**
19  * Generic burst enqueue, place crypto operations on ingress queue for
20  * processing.
21  *
22  * @param __qp         Queue Pair to process
23  * @param ops          Crypto operations for processing
24  * @param nb_ops       Number of crypto operations for processing
25  *
26  * @return
27  * - Number of crypto operations enqueued
28  */
29 static uint16_t
30 ipsec_mb_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
31 		uint16_t nb_ops)
32 {
33 	struct ipsec_mb_qp *qp = __qp;
34 
35 	unsigned int nb_enqueued;
36 
37 	nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
38 			(void **)ops, nb_ops, NULL);
39 
40 	qp->stats.enqueued_count += nb_enqueued;
41 	qp->stats.enqueue_err_count += nb_ops - nb_enqueued;
42 
43 	return nb_enqueued;
44 }
45 
46 static int
47 ipsec_mb_mp_request_register(void)
48 {
49 	RTE_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
50 	return rte_mp_action_register(IPSEC_MB_MP_MSG,
51 				ipsec_mb_ipc_request);
52 }
53 
54 static void
55 ipsec_mb_mp_request_unregister(void)
56 {
57 	RTE_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
58 	rte_mp_action_unregister(IPSEC_MB_MP_MSG);
59 }
60 
61 int
62 ipsec_mb_create(struct rte_vdev_device *vdev,
63 	enum ipsec_mb_pmd_types pmd_type)
64 {
65 	struct rte_cryptodev *dev;
66 	struct ipsec_mb_dev_private *internals;
67 	struct ipsec_mb_internals *pmd_data = &ipsec_mb_pmds[pmd_type];
68 	struct rte_cryptodev_pmd_init_params init_params = {};
69 	const char *name, *args;
70 	int retval;
71 
72 #if defined(RTE_ARCH_ARM)
73 	if ((pmd_type != IPSEC_MB_PMD_TYPE_SNOW3G) &&
74 		(pmd_type != IPSEC_MB_PMD_TYPE_ZUC))
75 		return -ENOTSUP;
76 #endif
77 
78 #if defined(RTE_ARCH_ARM64)
79 	vector_mode = IPSEC_MB_ARM64;
80 #elif defined(RTE_ARCH_X86_64)
81 	if (vector_mode == IPSEC_MB_NOT_SUPPORTED) {
82 		/* Check CPU for supported vector instruction set */
83 		if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
84 			vector_mode = IPSEC_MB_AVX512;
85 		else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
86 			vector_mode = IPSEC_MB_AVX2;
87 		else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
88 			vector_mode = IPSEC_MB_AVX;
89 		else
90 			vector_mode = IPSEC_MB_SSE;
91 	}
92 #else
93 	/* Unsupported architecture */
94 	return -ENOTSUP;
95 #endif
96 
97 	init_params.private_data_size = sizeof(struct ipsec_mb_dev_private) +
98 		pmd_data->internals_priv_size;
99 	init_params.max_nb_queue_pairs =
100 		RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS;
101 	init_params.socket_id = rte_socket_id();
102 
103 	name = rte_vdev_device_name(vdev);
104 	if (name == NULL)
105 		return -EINVAL;
106 
107 	args = rte_vdev_device_args(vdev);
108 
109 	retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
110 	if (retval) {
111 		IPSEC_MB_LOG(
112 		    ERR, "Failed to parse initialisation arguments[%s]", args);
113 		return -EINVAL;
114 	}
115 
116 	dev = rte_cryptodev_pmd_create(name, &vdev->device, &init_params);
117 	if (dev == NULL) {
118 		IPSEC_MB_LOG(ERR, "driver %s: create failed",
119 			     init_params.name);
120 		return -ENODEV;
121 	}
122 
123 	/* Set vector instructions mode supported */
124 	internals = dev->data->dev_private;
125 	internals->pmd_type = pmd_type;
126 	internals->max_nb_queue_pairs = init_params.max_nb_queue_pairs;
127 
128 	dev->driver_id = ipsec_mb_get_driver_id(pmd_type);
129 	if (dev->driver_id == UINT8_MAX) {
130 		IPSEC_MB_LOG(ERR, "driver %s: create failed",
131 			     init_params.name);
132 		return -ENODEV;
133 	}
134 	dev->dev_ops = ipsec_mb_pmds[pmd_type].ops;
135 	dev->enqueue_burst = ipsec_mb_enqueue_burst;
136 	dev->dequeue_burst = ipsec_mb_pmds[pmd_type].dequeue_burst;
137 	dev->feature_flags = pmd_data->feature_flags;
138 
139 	if (pmd_data->dev_config) {
140 		retval = (*pmd_data->dev_config)(dev);
141 		if (retval < 0) {
142 			IPSEC_MB_LOG(ERR,
143 				"Failed to configure device %s", name);
144 			rte_cryptodev_pmd_destroy(dev);
145 			return retval;
146 		}
147 	}
148 
149 	switch (vector_mode) {
150 	case IPSEC_MB_AVX512:
151 		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
152 		break;
153 	case IPSEC_MB_AVX2:
154 		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
155 		break;
156 	case IPSEC_MB_AVX:
157 		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
158 		break;
159 	case IPSEC_MB_SSE:
160 		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
161 		break;
162 	default:
163 		break;
164 	}
165 
166 	rte_cryptodev_pmd_probing_finish(dev);
167 
168 	IPSEC_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s",
169 		     imb_get_version_str());
170 
171 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
172 		retval = ipsec_mb_mp_request_register();
173 		if (retval && ((rte_errno == EEXIST) || (rte_errno == ENOTSUP)))
174 			/* Safe to proceed, return 0 */
175 			return 0;
176 
177 		if (retval)
178 			IPSEC_MB_LOG(ERR,
179 				"IPSec Multi-buffer register MP request failed.");
180 	}
181 	return retval;
182 }
183 
184 int
185 ipsec_mb_remove(struct rte_vdev_device *vdev)
186 {
187 	struct rte_cryptodev *cryptodev;
188 	const char *name;
189 	int qp_id;
190 
191 	name = rte_vdev_device_name(vdev);
192 	if (name == NULL)
193 		return -EINVAL;
194 
195 	cryptodev = rte_cryptodev_pmd_get_named_dev(name);
196 	if (cryptodev == NULL)
197 		return -ENODEV;
198 
199 	if (RTE_PER_LCORE(mb_mgr)) {
200 		free_mb_mgr(RTE_PER_LCORE(mb_mgr));
201 		RTE_PER_LCORE(mb_mgr) = NULL;
202 	}
203 
204 	if (cryptodev->security_ctx) {
205 		rte_free(cryptodev->security_ctx);
206 		cryptodev->security_ctx = NULL;
207 	}
208 
209 	for (qp_id = 0; qp_id < cryptodev->data->nb_queue_pairs; qp_id++)
210 		ipsec_mb_qp_release(cryptodev, qp_id);
211 
212 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
213 		ipsec_mb_mp_request_unregister();
214 
215 	return rte_cryptodev_pmd_destroy(cryptodev);
216 }
217