xref: /dpdk/drivers/crypto/ipsec_mb/ipsec_mb_private.c (revision 4b53e9802b6b6040ad5622b1414aaa93d9581d0c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4 
5 #include <bus_vdev_driver.h>
6 #include <rte_common.h>
7 #include <rte_cryptodev.h>
8 
9 #include "ipsec_mb_private.h"
10 
11 RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);
12 
13 struct ipsec_mb_internals ipsec_mb_pmds[IPSEC_MB_N_PMD_TYPES];
14 int ipsec_mb_logtype_driver;
15 enum ipsec_mb_vector_mode vector_mode;
16 
17 /**
18  * Generic burst enqueue, place crypto operations on ingress queue for
19  * processing.
20  *
21  * @param __qp         Queue Pair to process
22  * @param ops          Crypto operations for processing
23  * @param nb_ops       Number of crypto operations for processing
24  *
25  * @return
26  * - Number of crypto operations enqueued
27  */
28 static uint16_t
29 ipsec_mb_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
30 		uint16_t nb_ops)
31 {
32 	struct ipsec_mb_qp *qp = __qp;
33 
34 	unsigned int nb_enqueued;
35 
36 	nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
37 			(void **)ops, nb_ops, NULL);
38 
39 	qp->stats.enqueued_count += nb_enqueued;
40 	qp->stats.enqueue_err_count += nb_ops - nb_enqueued;
41 
42 	return nb_enqueued;
43 }
44 
45 static int
46 ipsec_mb_mp_request_register(void)
47 {
48 	RTE_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
49 	IPSEC_MB_LOG(INFO, "Starting register MP IPC request\n");
50 	return rte_mp_action_register(IPSEC_MB_MP_MSG,
51 				ipsec_mb_ipc_request);
52 }
53 
54 static void
55 ipsec_mb_mp_request_unregister(void)
56 {
57 	RTE_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
58 	rte_mp_action_unregister(IPSEC_MB_MP_MSG);
59 }
60 
61 int
62 ipsec_mb_create(struct rte_vdev_device *vdev,
63 	enum ipsec_mb_pmd_types pmd_type)
64 {
65 	struct rte_cryptodev *dev;
66 	struct ipsec_mb_dev_private *internals;
67 	struct ipsec_mb_internals *pmd_data = &ipsec_mb_pmds[pmd_type];
68 	struct rte_cryptodev_pmd_init_params init_params = {};
69 	const char *name, *args;
70 	int retval;
71 
72 #if defined(RTE_ARCH_ARM)
73 	if ((pmd_type != IPSEC_MB_PMD_TYPE_SNOW3G) &&
74 		(pmd_type != IPSEC_MB_PMD_TYPE_ZUC))
75 		return -ENOTSUP;
76 #endif
77 
78 #if defined(RTE_ARCH_ARM64)
79 	vector_mode = IPSEC_MB_ARM64;
80 #elif defined(RTE_ARCH_X86_64)
81 	if (vector_mode == IPSEC_MB_NOT_SUPPORTED) {
82 		/* Check CPU for supported vector instruction set */
83 		if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
84 			vector_mode = IPSEC_MB_AVX512;
85 		else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
86 			vector_mode = IPSEC_MB_AVX2;
87 		else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
88 			vector_mode = IPSEC_MB_AVX;
89 		else
90 			vector_mode = IPSEC_MB_SSE;
91 	}
92 #else
93 	/* Unsupported architecture */
94 	return -ENOTSUP;
95 #endif
96 
97 	init_params.private_data_size = sizeof(struct ipsec_mb_dev_private) +
98 		pmd_data->internals_priv_size;
99 	init_params.max_nb_queue_pairs =
100 		RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS;
101 	init_params.socket_id = rte_socket_id();
102 
103 	name = rte_vdev_device_name(vdev);
104 	if (name == NULL)
105 		return -EINVAL;
106 
107 	args = rte_vdev_device_args(vdev);
108 
109 	retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
110 	if (retval) {
111 		IPSEC_MB_LOG(
112 		    ERR, "Failed to parse initialisation arguments[%s]", args);
113 		return -EINVAL;
114 	}
115 
116 	dev = rte_cryptodev_pmd_create(name, &vdev->device, &init_params);
117 	if (dev == NULL) {
118 		IPSEC_MB_LOG(ERR, "driver %s: create failed",
119 			     init_params.name);
120 		return -ENODEV;
121 	}
122 
123 	/* Set vector instructions mode supported */
124 	internals = dev->data->dev_private;
125 	internals->pmd_type = pmd_type;
126 	internals->max_nb_queue_pairs = init_params.max_nb_queue_pairs;
127 
128 	dev->driver_id = ipsec_mb_get_driver_id(pmd_type);
129 	if (dev->driver_id == UINT8_MAX) {
130 		IPSEC_MB_LOG(ERR, "driver %s: create failed",
131 			     init_params.name);
132 		return -ENODEV;
133 	}
134 	dev->dev_ops = ipsec_mb_pmds[pmd_type].ops;
135 	dev->enqueue_burst = ipsec_mb_enqueue_burst;
136 	dev->dequeue_burst = ipsec_mb_pmds[pmd_type].dequeue_burst;
137 	dev->feature_flags = pmd_data->feature_flags;
138 
139 	if (pmd_data->dev_config) {
140 		retval = (*pmd_data->dev_config)(dev);
141 		if (retval < 0) {
142 			IPSEC_MB_LOG(ERR,
143 				"Failed to configure device %s", name);
144 			rte_cryptodev_pmd_destroy(dev);
145 			return retval;
146 		}
147 	}
148 
149 	switch (vector_mode) {
150 	case IPSEC_MB_AVX512:
151 		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
152 		break;
153 	case IPSEC_MB_AVX2:
154 		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
155 		break;
156 	case IPSEC_MB_AVX:
157 		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
158 		break;
159 	case IPSEC_MB_SSE:
160 		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
161 		break;
162 	default:
163 		break;
164 	}
165 
166 	rte_cryptodev_pmd_probing_finish(dev);
167 
168 	IPSEC_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
169 		     imb_get_version_str());
170 
171 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
172 		retval = ipsec_mb_mp_request_register();
173 
174 	return retval;
175 }
176 
177 int
178 ipsec_mb_remove(struct rte_vdev_device *vdev)
179 {
180 	struct rte_cryptodev *cryptodev;
181 	const char *name;
182 	int qp_id;
183 
184 	name = rte_vdev_device_name(vdev);
185 	if (name == NULL)
186 		return -EINVAL;
187 
188 	cryptodev = rte_cryptodev_pmd_get_named_dev(name);
189 	if (cryptodev == NULL)
190 		return -ENODEV;
191 
192 	if (RTE_PER_LCORE(mb_mgr)) {
193 		free_mb_mgr(RTE_PER_LCORE(mb_mgr));
194 		RTE_PER_LCORE(mb_mgr) = NULL;
195 	}
196 
197 	if (cryptodev->security_ctx) {
198 		rte_free(cryptodev->security_ctx);
199 		cryptodev->security_ctx = NULL;
200 	}
201 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
202 	rte_free(cryptodev->security_ctx);
203 	cryptodev->security_ctx = NULL;
204 #endif
205 
206 	for (qp_id = 0; qp_id < cryptodev->data->nb_queue_pairs; qp_id++)
207 		ipsec_mb_qp_release(cryptodev, qp_id);
208 
209 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
210 		ipsec_mb_mp_request_unregister();
211 
212 	return rte_cryptodev_pmd_destroy(cryptodev);
213 }
214