xref: /dpdk/drivers/crypto/qat/qat_sym.c (revision 8731420d9ad6087e4b3ebe3c2bfb26ee5c3c8826)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2023 Intel Corporation
3  */
4 
5 #ifdef RTE_QAT_OPENSSL
6 #include <openssl/evp.h>
7 #endif
8 
9 #include <rte_mempool.h>
10 #include <rte_mbuf.h>
11 #include <rte_crypto_sym.h>
12 #include <bus_pci_driver.h>
13 #include <rte_byteorder.h>
14 #include <rte_security_driver.h>
15 
16 #include "qat_common.h"
17 #include "qat_sym.h"
18 #include "qat_crypto.h"
19 #include "qat_qp.h"
20 
21 uint8_t qat_sym_driver_id;
22 
23 #define SYM_ENQ_THRESHOLD_NAME "qat_sym_enq_threshold"
24 #define SYM_CIPHER_CRC_ENABLE_NAME "qat_sym_cipher_crc_enable"
25 
26 static const char *const arguments[] = {
27 	SYM_ENQ_THRESHOLD_NAME,
28 	SYM_CIPHER_CRC_ENABLE_NAME,
29 	NULL
30 };
31 
32 struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
33 
34 /* An rte_driver is needed in the registration of both the device and the driver
35  * with cryptodev.
36  * The actual qat pci's rte_driver can't be used as its name represents
37  * the whole pci device with all services. Think of this as a holder for a name
38  * for the crypto part of the pci device.
39  */
40 static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
41 static const struct rte_driver cryptodev_qat_sym_driver = {
42 	.name = qat_sym_drv_name,
43 	.alias = qat_sym_drv_name
44 };
45 
46 void
47 qat_sym_init_op_cookie(void *op_cookie)
48 {
49 	struct qat_sym_op_cookie *cookie = op_cookie;
50 
51 	cookie->qat_sgl_src_phys_addr =
52 			rte_mempool_virt2iova(cookie) +
53 			offsetof(struct qat_sym_op_cookie,
54 			qat_sgl_src);
55 
56 	cookie->qat_sgl_dst_phys_addr =
57 			rte_mempool_virt2iova(cookie) +
58 			offsetof(struct qat_sym_op_cookie,
59 			qat_sgl_dst);
60 
61 	cookie->opt.spc_gmac.cd_phys_addr =
62 			rte_mempool_virt2iova(cookie) +
63 			offsetof(struct qat_sym_op_cookie,
64 			opt.spc_gmac.cd_cipher);
65 
66 	cookie->digest_null_phys_addr =
67 			rte_mempool_virt2iova(cookie) +
68 			offsetof(struct qat_sym_op_cookie,
69 			digest_null);
70 }
71 
72 static __rte_always_inline int
73 qat_sym_build_request(void *in_op, uint8_t *out_msg,
74 		void *op_cookie, struct qat_qp *qp)
75 {
76 	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
77 	uintptr_t sess = (uintptr_t)qp->opaque[0];
78 	uintptr_t build_request_p = (uintptr_t)qp->opaque[1];
79 	qat_sym_build_request_t build_request = (void *)build_request_p;
80 	struct qat_sym_session *ctx = NULL;
81 	enum rte_proc_type_t proc_type = rte_eal_process_type();
82 
83 	if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
84 		return -EINVAL;
85 
86 	if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) {
87 		ctx = (void *)CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
88 		if (sess != (uintptr_t)ctx) {
89 			struct rte_cryptodev *cdev;
90 			struct qat_cryptodev_private *internals;
91 
92 			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
93 			internals = cdev->data->dev_private;
94 
95 			if (internals->qat_dev->qat_dev_gen != qp->qat_dev_gen) {
96 				op->status =
97 					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
98 				return -EINVAL;
99 			}
100 
101 			if (unlikely(ctx->build_request[proc_type] == NULL)) {
102 				int ret =
103 				qat_sym_gen_dev_ops[qp->qat_dev_gen].set_session(
104 					(void *)cdev, (void *)ctx);
105 				if (ret < 0) {
106 					op->status =
107 						RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
108 					return -EINVAL;
109 				}
110 			}
111 
112 			build_request = ctx->build_request[proc_type];
113 			qp->opaque[0] = (uintptr_t)ctx;
114 			qp->opaque[1] = (uintptr_t)build_request;
115 		}
116 	} else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
117 		ctx = SECURITY_GET_SESS_PRIV(op->sym->session);
118 		if (unlikely(!ctx)) {
119 			QAT_DP_LOG(ERR, "No session for this device");
120 			return -EINVAL;
121 		}
122 		if (sess != (uintptr_t)ctx) {
123 			struct rte_cryptodev *cdev;
124 			struct qat_cryptodev_private *internals;
125 
126 #ifdef RTE_QAT_OPENSSL
127 			if (unlikely(ctx->bpi_ctx == NULL)) {
128 #else
129 			if (unlikely(ctx->mb_mgr == NULL)) {
130 #endif
131 				QAT_DP_LOG(ERR, "QAT PMD only supports security"
132 						" operation requests for"
133 						" DOCSIS, op (%p) is not for"
134 						" DOCSIS.", op);
135 				return -EINVAL;
136 			} else if (unlikely(((op->sym->m_dst != NULL) &&
137 					(op->sym->m_dst != op->sym->m_src)) ||
138 					op->sym->m_src->nb_segs > 1)) {
139 				QAT_DP_LOG(ERR, "OOP and/or multi-segment"
140 						" buffers not supported for"
141 						" DOCSIS security.");
142 				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
143 				return -EINVAL;
144 			}
145 			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
146 			internals = cdev->data->dev_private;
147 
148 			if (internals->qat_dev->qat_dev_gen != qp->qat_dev_gen) {
149 				op->status =
150 					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
151 				return -EINVAL;
152 			}
153 
154 			if (unlikely(ctx->build_request[proc_type] == NULL)) {
155 				int ret =
156 				qat_sym_gen_dev_ops[qp->qat_dev_gen].set_session(
157 					(void *)cdev, (void *)sess);
158 				if (ret < 0) {
159 					op->status =
160 						RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
161 					return -EINVAL;
162 				}
163 			}
164 
165 			sess = (uintptr_t)op->sym->session;
166 			build_request = ctx->build_request[proc_type];
167 			qp->opaque[0] = sess;
168 			qp->opaque[1] = (uintptr_t)build_request;
169 		}
170 	} else { /* RTE_CRYPTO_OP_SESSIONLESS */
171 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
172 		QAT_LOG(DEBUG, "QAT does not support sessionless operation");
173 		return -1;
174 	}
175 
176 	return build_request(op, (void *)ctx, out_msg, op_cookie);
177 }
178 
179 uint16_t
180 qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
181 		uint16_t nb_ops)
182 {
183 	return qat_enqueue_op_burst(qp, qat_sym_build_request,
184 			(void **)ops, nb_ops);
185 }
186 
187 uint16_t
188 qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
189 		uint16_t nb_ops)
190 {
191 	return qat_dequeue_op_burst(qp, (void **)ops,
192 							qat_sym_process_response, nb_ops);
193 }
194 
195 uint16_t
196 qat_sym_dequeue_burst_gen_lce(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
197 {
198 	return qat_dequeue_op_burst(qp, (void **)ops, qat_sym_process_response_gen_lce, nb_ops);
199 }
200 
201 static int
202 qat_sym_dev_create(struct qat_pci_device *qat_pci_dev)
203 {
204 	int ret = 0;
205 	struct qat_device_info *qat_dev_instance =
206 			&qat_pci_devs[qat_pci_dev->qat_dev_id];
207 	struct rte_cryptodev_pmd_init_params init_params = {
208 		.name = "",
209 		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
210 		.private_data_size = sizeof(struct qat_cryptodev_private)
211 	};
212 	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
213 	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
214 	struct rte_cryptodev *cryptodev;
215 	struct qat_cryptodev_private *internals;
216 	enum qat_device_gen qat_dev_gen = qat_pci_dev->qat_dev_gen;
217 	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
218 		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
219 	uint16_t sub_id = qat_dev_instance->pci_dev->id.subsystem_device_id;
220 	char *cmdline = NULL;
221 
222 	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
223 			qat_pci_dev->name, "sym");
224 	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
225 
226 	if (qat_pci_dev->qat_dev_gen == QAT_VQAT &&
227 		sub_id != ADF_VQAT_SYM_PCI_SUBSYSTEM_ID) {
228 		QAT_LOG(ERR, "Device (vqat instance) %s does not support symmetric crypto",
229 				name);
230 		return -EFAULT;
231 	}
232 	if (gen_dev_ops->cryptodev_ops == NULL) {
233 		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
234 				name);
235 		return -(EFAULT);
236 	}
237 
238 	/*
239 	 * All processes must use same driver id so they can share sessions.
240 	 * Store driver_id so we can validate that all processes have the same
241 	 * value, typically they have, but could differ if binaries built
242 	 * separately.
243 	 */
244 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
245 		qat_pci_dev->qat_sym_driver_id =
246 				qat_sym_driver_id;
247 	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
248 		if (qat_pci_dev->qat_sym_driver_id !=
249 				qat_sym_driver_id) {
250 			QAT_LOG(ERR,
251 				"Device %s have different driver id than corresponding device in primary process",
252 				name);
253 			return -(EFAULT);
254 		}
255 	}
256 
257 	/* Populate subset device to use in cryptodev device creation */
258 	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
259 	qat_dev_instance->sym_rte_dev.numa_node =
260 			qat_dev_instance->pci_dev->device.numa_node;
261 	qat_dev_instance->sym_rte_dev.devargs = NULL;
262 
263 	cryptodev = rte_cryptodev_pmd_create(name,
264 			&(qat_dev_instance->sym_rte_dev), &init_params);
265 
266 	if (cryptodev == NULL)
267 		return -ENODEV;
268 
269 	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
270 	cryptodev->driver_id = qat_sym_driver_id;
271 	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
272 
273 	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
274 	if (qat_dev_gen == QAT_GEN_LCE)
275 		cryptodev->dequeue_burst = qat_sym_dequeue_burst_gen_lce;
276 	else
277 		cryptodev->dequeue_burst = qat_sym_dequeue_burst;
278 
279 	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
280 
281 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
282 		return 0;
283 
284 	if (gen_dev_ops->create_security_ctx) {
285 		cryptodev->security_ctx =
286 			gen_dev_ops->create_security_ctx((void *)cryptodev);
287 		if (cryptodev->security_ctx == NULL) {
288 			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
289 			ret = -ENOMEM;
290 			goto error;
291 		}
292 
293 		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
294 		QAT_LOG(INFO, "Device %s rte_security support enabled", name);
295 	} else {
296 		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
297 	}
298 	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
299 			"QAT_SYM_CAPA_GEN_%d",
300 			qat_pci_dev->qat_dev_gen);
301 
302 	internals = cryptodev->data->dev_private;
303 	internals->qat_dev = qat_pci_dev;
304 	internals->dev_id = cryptodev->data->dev_id;
305 
306 	cmdline = qat_dev_cmdline_get_val(qat_pci_dev,
307 			SYM_ENQ_THRESHOLD_NAME);
308 	if (cmdline) {
309 		internals->min_enq_burst_threshold =
310 			atoi(cmdline) > MAX_QP_THRESHOLD_SIZE ?
311 			MAX_QP_THRESHOLD_SIZE :
312 			atoi(cmdline);
313 	}
314 	cmdline = qat_dev_cmdline_get_val(qat_pci_dev,
315 			SYM_CIPHER_CRC_ENABLE_NAME);
316 	if (cmdline)
317 		internals->cipher_crc_offload_enable = atoi(cmdline);
318 
319 	if (gen_dev_ops->get_capabilities(internals,
320 			capa_memz_name, qat_pci_dev->options.slice_map) < 0) {
321 		QAT_LOG(ERR,
322 			"Device cannot obtain capabilities, destroying PMD for %s",
323 			name);
324 		ret = -1;
325 		goto error;
326 	}
327 	internals->service_type = QAT_SERVICE_SYMMETRIC;
328 	qat_pci_dev->pmd[QAT_SERVICE_SYMMETRIC] = internals;
329 	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
330 			cryptodev->data->name, internals->dev_id);
331 
332 	return 0;
333 
334 error:
335 	rte_free(cryptodev->security_ctx);
336 	cryptodev->security_ctx = NULL;
337 	rte_cryptodev_pmd_destroy(cryptodev);
338 	memset(&qat_dev_instance->sym_rte_dev, 0,
339 		sizeof(qat_dev_instance->sym_rte_dev));
340 
341 	return ret;
342 }
343 
344 static int
345 qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
346 {
347 	struct rte_cryptodev *cryptodev;
348 	struct qat_cryptodev_private *dev;
349 
350 	if (qat_pci_dev == NULL)
351 		return -ENODEV;
352 	dev = qat_pci_dev->pmd[QAT_SERVICE_SYMMETRIC];
353 	if (dev == NULL)
354 		return 0;
355 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
356 		rte_memzone_free(dev->capa_mz);
357 
358 	/* free crypto device */
359 	cryptodev = rte_cryptodev_pmd_get_dev(dev->dev_id);
360 	rte_free(cryptodev->security_ctx);
361 	cryptodev->security_ctx = NULL;
362 	rte_cryptodev_pmd_destroy(cryptodev);
363 	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
364 	qat_pci_dev->pmd[QAT_SERVICE_SYMMETRIC] = NULL;
365 
366 	return 0;
367 }
368 
369 int
370 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
371 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
372 	enum rte_crypto_op_sess_type sess_type,
373 	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
374 {
375 	struct qat_cryptodev_private *internals = dev->data->dev_private;
376 	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
377 	struct qat_crypto_gen_dev_ops *gen_dev_ops =
378 			&qat_sym_gen_dev_ops[qat_dev_gen];
379 	struct qat_qp *qp;
380 	struct qat_sym_session *ctx;
381 	struct qat_sym_dp_ctx *dp_ctx;
382 
383 	if (!gen_dev_ops->set_raw_dp_ctx) {
384 		QAT_LOG(ERR, "Device GEN %u does not support raw data path",
385 				qat_dev_gen);
386 		return -ENOTSUP;
387 	}
388 
389 	qp = dev->data->queue_pairs[qp_id];
390 	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
391 
392 	if (!is_update) {
393 		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
394 				sizeof(struct qat_sym_dp_ctx));
395 		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
396 		dp_ctx->tail = qp->tx_q.tail;
397 		dp_ctx->head = qp->rx_q.head;
398 		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
399 	}
400 
401 	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
402 		return -EINVAL;
403 
404 	ctx = CRYPTODEV_GET_SYM_SESS_PRIV(session_ctx.crypto_sess);
405 
406 	dp_ctx->session = ctx;
407 
408 	return gen_dev_ops->set_raw_dp_ctx(raw_dp_ctx, ctx);
409 }
410 
411 int
412 qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev __rte_unused)
413 {
414 	return sizeof(struct qat_sym_dp_ctx);
415 }
416 
417 static struct cryptodev_driver qat_crypto_drv;
418 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
419 		cryptodev_qat_sym_driver,
420 		qat_sym_driver_id);
421 
422 RTE_INIT(qat_sym_init)
423 {
424 	qat_cmdline_defines[QAT_SERVICE_SYMMETRIC] = arguments;
425 	qat_service[QAT_SERVICE_SYMMETRIC].name = "symmetric crypto";
426 	qat_service[QAT_SERVICE_SYMMETRIC].dev_create = qat_sym_dev_create;
427 	qat_service[QAT_SERVICE_SYMMETRIC].dev_destroy = qat_sym_dev_destroy;
428 }
429