xref: /dpdk/drivers/crypto/ipsec_mb/pmd_zuc.c (revision b53d106d34b5c638f5a2cbdfee0da5bd42d4383f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2021 Intel Corporation
3  */
4 
5 #include "pmd_zuc_priv.h"
6 
7 /** Parse crypto xform chain and set private session parameters. */
8 static int
9 zuc_session_configure(__rte_unused IMB_MGR * mgr, void *zuc_sess,
10 		const struct rte_crypto_sym_xform *xform)
11 {
12 	struct zuc_session *sess = (struct zuc_session *) zuc_sess;
13 	const struct rte_crypto_sym_xform *auth_xform = NULL;
14 	const struct rte_crypto_sym_xform *cipher_xform = NULL;
15 	enum ipsec_mb_operation mode;
16 	/* Select Crypto operation - hash then cipher / cipher then hash */
17 	int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
18 				&cipher_xform, NULL);
19 
20 	if (ret)
21 		return ret;
22 
23 	if (cipher_xform) {
24 		/* Only ZUC EEA3 supported */
25 		if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_ZUC_EEA3)
26 			return -ENOTSUP;
27 
28 		if (cipher_xform->cipher.iv.length != ZUC_IV_KEY_LENGTH) {
29 			IPSEC_MB_LOG(ERR, "Wrong IV length");
30 			return -EINVAL;
31 		}
32 		sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
33 
34 		/* Copy the key */
35 		memcpy(sess->pKey_cipher, cipher_xform->cipher.key.data,
36 				ZUC_IV_KEY_LENGTH);
37 	}
38 
39 	if (auth_xform) {
40 		/* Only ZUC EIA3 supported */
41 		if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_ZUC_EIA3)
42 			return -ENOTSUP;
43 
44 		if (auth_xform->auth.digest_length != ZUC_DIGEST_LENGTH) {
45 			IPSEC_MB_LOG(ERR, "Wrong digest length");
46 			return -EINVAL;
47 		}
48 
49 		sess->auth_op = auth_xform->auth.op;
50 
51 		if (auth_xform->auth.iv.length != ZUC_IV_KEY_LENGTH) {
52 			IPSEC_MB_LOG(ERR, "Wrong IV length");
53 			return -EINVAL;
54 		}
55 		sess->auth_iv_offset = auth_xform->auth.iv.offset;
56 
57 		/* Copy the key */
58 		memcpy(sess->pKey_hash, auth_xform->auth.key.data,
59 				ZUC_IV_KEY_LENGTH);
60 	}
61 
62 	sess->op = mode;
63 	return 0;
64 }
65 
66 /** Encrypt/decrypt mbufs. */
67 static uint8_t
68 process_zuc_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
69 		struct zuc_session **sessions,
70 		uint8_t num_ops)
71 {
72 	unsigned int i;
73 	uint8_t processed_ops = 0;
74 	const void *src[ZUC_MAX_BURST];
75 	void *dst[ZUC_MAX_BURST];
76 	const void *iv[ZUC_MAX_BURST];
77 	uint32_t num_bytes[ZUC_MAX_BURST];
78 	const void *cipher_keys[ZUC_MAX_BURST];
79 	struct zuc_session *sess;
80 
81 	for (i = 0; i < num_ops; i++) {
82 		if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
83 				|| ((ops[i]->sym->cipher.data.offset
84 					% BYTE_LEN) != 0)) {
85 			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
86 			IPSEC_MB_LOG(ERR, "Data Length or offset");
87 			break;
88 		}
89 
90 		sess = sessions[i];
91 
92 #ifdef RTE_LIBRTE_PMD_ZUC_DEBUG
93 		if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
94 				(ops[i]->sym->m_dst != NULL &&
95 				!rte_pktmbuf_is_contiguous(
96 						ops[i]->sym->m_dst))) {
97 			IPSEC_MB_LOG(ERR, "PMD supports only "
98 				" contiguous mbufs, op (%p) "
99 				"provides noncontiguous mbuf "
100 				"as source/destination buffer.\n",
101 				"PMD supports only contiguous mbufs, "
102 				"op (%p) provides noncontiguous mbuf "
103 				"as source/destination buffer.\n",
104 				ops[i]);
105 			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
106 			break;
107 		}
108 #endif
109 
110 		src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
111 				(ops[i]->sym->cipher.data.offset >> 3);
112 		dst[i] = ops[i]->sym->m_dst ?
113 			rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
114 				(ops[i]->sym->cipher.data.offset >> 3) :
115 			rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
116 				(ops[i]->sym->cipher.data.offset >> 3);
117 		iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
118 				sess->cipher_iv_offset);
119 		num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
120 
121 		cipher_keys[i] = sess->pKey_cipher;
122 
123 		processed_ops++;
124 	}
125 
126 	IMB_ZUC_EEA3_N_BUFFER(qp->mb_mgr, (const void **)cipher_keys,
127 			(const void **)iv, (const void **)src, (void **)dst,
128 			num_bytes, processed_ops);
129 
130 	return processed_ops;
131 }
132 
133 /** Generate/verify hash from mbufs. */
134 static int
135 process_zuc_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
136 		struct zuc_session **sessions,
137 		uint8_t num_ops)
138 {
139 	unsigned int i;
140 	uint8_t processed_ops = 0;
141 	uint8_t *src[ZUC_MAX_BURST] = { 0 };
142 	uint32_t *dst[ZUC_MAX_BURST];
143 	uint32_t length_in_bits[ZUC_MAX_BURST] = { 0 };
144 	uint8_t *iv[ZUC_MAX_BURST] = { 0 };
145 	const void *hash_keys[ZUC_MAX_BURST] = { 0 };
146 	struct zuc_session *sess;
147 	struct zuc_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
148 
149 
150 	for (i = 0; i < num_ops; i++) {
151 		/* Data must be byte aligned */
152 		if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
153 			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
154 			IPSEC_MB_LOG(ERR, "Offset");
155 			break;
156 		}
157 
158 		sess = sessions[i];
159 
160 		length_in_bits[i] = ops[i]->sym->auth.data.length;
161 
162 		src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
163 				(ops[i]->sym->auth.data.offset >> 3);
164 		iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
165 				sess->auth_iv_offset);
166 
167 		hash_keys[i] = sess->pKey_hash;
168 		if (sess->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
169 			dst[i] = (uint32_t *)qp_data->temp_digest;
170 		else
171 			dst[i] = (uint32_t *)ops[i]->sym->auth.digest.data;
172 
173 		processed_ops++;
174 	}
175 
176 	IMB_ZUC_EIA3_N_BUFFER(qp->mb_mgr, (const void **)hash_keys,
177 			(const void * const *)iv, (const void * const *)src,
178 			length_in_bits, dst, processed_ops);
179 
180 	/*
181 	 * If tag needs to be verified, compare generated tag
182 	 * with attached tag
183 	 */
184 	for (i = 0; i < processed_ops; i++)
185 		if (sessions[i]->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
186 			if (memcmp(dst[i], ops[i]->sym->auth.digest.data,
187 					ZUC_DIGEST_LENGTH) != 0)
188 				ops[i]->status =
189 					RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
190 
191 	return processed_ops;
192 }
193 
194 /** Process a batch of crypto ops which shares the same operation type. */
195 static int
196 process_ops(struct rte_crypto_op **ops, enum ipsec_mb_operation op_type,
197 		struct zuc_session **sessions,
198 		struct ipsec_mb_qp *qp, uint8_t num_ops)
199 {
200 	unsigned int i;
201 	unsigned int processed_ops;
202 
203 	switch (op_type) {
204 	case IPSEC_MB_OP_ENCRYPT_ONLY:
205 	case IPSEC_MB_OP_DECRYPT_ONLY:
206 		processed_ops = process_zuc_cipher_op(qp, ops,
207 				sessions, num_ops);
208 		break;
209 	case IPSEC_MB_OP_HASH_GEN_ONLY:
210 	case IPSEC_MB_OP_HASH_VERIFY_ONLY:
211 		processed_ops = process_zuc_hash_op(qp, ops, sessions,
212 				num_ops);
213 		break;
214 	case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
215 		processed_ops = process_zuc_cipher_op(qp, ops, sessions,
216 				num_ops);
217 		process_zuc_hash_op(qp, ops, sessions, processed_ops);
218 		break;
219 	case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
220 		processed_ops = process_zuc_hash_op(qp, ops, sessions,
221 				num_ops);
222 		process_zuc_cipher_op(qp, ops, sessions, processed_ops);
223 		break;
224 	default:
225 		/* Operation not supported. */
226 		processed_ops = 0;
227 	}
228 
229 	for (i = 0; i < num_ops; i++) {
230 		/*
231 		 * If there was no error/authentication failure,
232 		 * change status to successful.
233 		 */
234 		if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
235 			ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
236 		/* Free session if a session-less crypto op. */
237 		if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
238 			memset(sessions[i], 0, sizeof(struct zuc_session));
239 			memset(ops[i]->sym->session, 0,
240 			rte_cryptodev_sym_get_existing_header_session_size(
241 					ops[i]->sym->session));
242 			rte_mempool_put(qp->sess_mp_priv, sessions[i]);
243 			rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
244 			ops[i]->sym->session = NULL;
245 		}
246 	}
247 	return processed_ops;
248 }
249 
250 static uint16_t
251 zuc_pmd_dequeue_burst(void *queue_pair,
252 		struct rte_crypto_op **c_ops, uint16_t nb_ops)
253 {
254 
255 	struct rte_crypto_op *curr_c_op;
256 
257 	struct zuc_session *curr_sess;
258 	struct zuc_session *sessions[ZUC_MAX_BURST];
259 	enum ipsec_mb_operation prev_zuc_op = IPSEC_MB_OP_NOT_SUPPORTED;
260 	enum ipsec_mb_operation curr_zuc_op;
261 	struct ipsec_mb_qp *qp = queue_pair;
262 	unsigned int nb_dequeued;
263 	unsigned int i;
264 	uint8_t burst_size = 0;
265 	uint8_t processed_ops;
266 
267 	nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
268 			(void **)c_ops, nb_ops, NULL);
269 
270 
271 	for (i = 0; i < nb_dequeued; i++) {
272 		curr_c_op = c_ops[i];
273 
274 		curr_sess = (struct zuc_session *)
275 			ipsec_mb_get_session_private(qp, curr_c_op);
276 		if (unlikely(curr_sess == NULL)) {
277 			curr_c_op->status =
278 					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
279 			break;
280 		}
281 
282 		curr_zuc_op = curr_sess->op;
283 
284 		/*
285 		 * Batch ops that share the same operation type
286 		 * (cipher only, auth only...).
287 		 */
288 		if (burst_size == 0) {
289 			prev_zuc_op = curr_zuc_op;
290 			c_ops[0] = curr_c_op;
291 			sessions[0] = curr_sess;
292 			burst_size++;
293 		} else if (curr_zuc_op == prev_zuc_op) {
294 			c_ops[burst_size] = curr_c_op;
295 			sessions[burst_size] = curr_sess;
296 			burst_size++;
297 			/*
298 			 * When there are enough ops to process in a batch,
299 			 * process them, and start a new batch.
300 			 */
301 			if (burst_size == ZUC_MAX_BURST) {
302 				processed_ops = process_ops(c_ops, curr_zuc_op,
303 						sessions, qp, burst_size);
304 				if (processed_ops < burst_size) {
305 					burst_size = 0;
306 					break;
307 				}
308 
309 				burst_size = 0;
310 			}
311 		} else {
312 			/*
313 			 * Different operation type, process the ops
314 			 * of the previous type.
315 			 */
316 			processed_ops = process_ops(c_ops, prev_zuc_op,
317 					sessions, qp, burst_size);
318 			if (processed_ops < burst_size) {
319 				burst_size = 0;
320 				break;
321 			}
322 
323 			burst_size = 0;
324 			prev_zuc_op = curr_zuc_op;
325 
326 			c_ops[0] = curr_c_op;
327 			sessions[0] = curr_sess;
328 			burst_size++;
329 		}
330 	}
331 
332 	if (burst_size != 0) {
333 		/* Process the crypto ops of the last operation type. */
334 		processed_ops = process_ops(c_ops, prev_zuc_op,
335 				sessions, qp, burst_size);
336 	}
337 
338 	qp->stats.dequeued_count += i;
339 	return i;
340 }
341 
342 struct rte_cryptodev_ops zuc_pmd_ops = {
343 	.dev_configure = ipsec_mb_config,
344 	.dev_start = ipsec_mb_start,
345 	.dev_stop = ipsec_mb_stop,
346 	.dev_close = ipsec_mb_close,
347 
348 	.stats_get = ipsec_mb_stats_get,
349 	.stats_reset = ipsec_mb_stats_reset,
350 
351 	.dev_infos_get = ipsec_mb_info_get,
352 
353 	.queue_pair_setup = ipsec_mb_qp_setup,
354 	.queue_pair_release = ipsec_mb_qp_release,
355 
356 	.sym_session_get_size = ipsec_mb_sym_session_get_size,
357 	.sym_session_configure = ipsec_mb_sym_session_configure,
358 	.sym_session_clear = ipsec_mb_sym_session_clear
359 };
360 
361 struct rte_cryptodev_ops *rte_zuc_pmd_ops = &zuc_pmd_ops;
362 
363 static int
364 zuc_probe(struct rte_vdev_device *vdev)
365 {
366 	return ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_ZUC);
367 }
368 
369 static struct rte_vdev_driver cryptodev_zuc_pmd_drv = {
370 	.probe = zuc_probe,
371 	.remove = ipsec_mb_remove
372 
373 };
374 
375 static struct cryptodev_driver zuc_crypto_drv;
376 
377 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ZUC_PMD, cryptodev_zuc_pmd_drv);
378 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_ZUC_PMD, cryptodev_zuc_pmd);
379 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ZUC_PMD,
380 	"max_nb_queue_pairs=<int> socket_id=<int>");
381 RTE_PMD_REGISTER_CRYPTO_DRIVER(zuc_crypto_drv, cryptodev_zuc_pmd_drv.driver,
382 		pmd_driver_id_zuc);
383 
384 /* Constructor function to register zuc PMD */
385 RTE_INIT(ipsec_mb_register_zuc)
386 {
387 	struct ipsec_mb_internals *zuc_data
388 	    = &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_ZUC];
389 
390 	zuc_data->caps = zuc_capabilities;
391 	zuc_data->dequeue_burst = zuc_pmd_dequeue_burst;
392 	zuc_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
393 			| RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING
394 			| RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA
395 			| RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT
396 			| RTE_CRYPTODEV_FF_SYM_SESSIONLESS
397 			| RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
398 	zuc_data->internals_priv_size = 0;
399 	zuc_data->ops = &zuc_pmd_ops;
400 	zuc_data->qp_priv_size = sizeof(struct zuc_qp_data);
401 	zuc_data->session_configure = zuc_session_configure;
402 	zuc_data->session_priv_size = sizeof(struct zuc_session);
403 }
404