xref: /dpdk/app/test-crypto-perf/cperf_ops.c (revision b53d106d34b5c638f5a2cbdfee0da5bd42d4383f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <rte_ether.h>
7 #include <rte_ip.h>
8 
9 #include "cperf_ops.h"
10 #include "cperf_test_vectors.h"
11 
12 static int
13 cperf_set_ops_asym(struct rte_crypto_op **ops,
14 		   uint32_t src_buf_offset __rte_unused,
15 		   uint32_t dst_buf_offset __rte_unused, uint16_t nb_ops,
16 		   struct rte_cryptodev_sym_session *sess,
17 		   const struct cperf_options *options __rte_unused,
18 		   const struct cperf_test_vector *test_vector __rte_unused,
19 		   uint16_t iv_offset __rte_unused,
20 		   uint32_t *imix_idx __rte_unused,
21 		   uint64_t *tsc_start __rte_unused)
22 {
23 	uint16_t i;
24 	struct rte_cryptodev_asym_session *asym_sess = (void *)sess;
25 
26 	for (i = 0; i < nb_ops; i++) {
27 		struct rte_crypto_asym_op *asym_op = ops[i]->asym;
28 
29 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
30 		asym_op->modex.base.data = perf_base;
31 		asym_op->modex.base.length = sizeof(perf_base);
32 		asym_op->modex.result.data = perf_mod_result;
33 		asym_op->modex.result.length = sizeof(perf_mod_result);
34 		rte_crypto_op_attach_asym_session(ops[i], asym_sess);
35 	}
36 	return 0;
37 }
38 
39 #ifdef RTE_LIB_SECURITY
40 static void
41 test_ipsec_vec_populate(struct rte_mbuf *m, const struct cperf_options *options,
42 			const struct cperf_test_vector *test_vector)
43 {
44 	struct rte_ipv4_hdr *ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *);
45 
46 	if ((options->aead_op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ||
47 		(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) {
48 		memcpy(ip, test_vector->plaintext.data, m->data_len);
49 
50 		ip->total_length = rte_cpu_to_be_16(m->data_len);
51 	}
52 }
53 
54 static int
55 cperf_set_ops_security(struct rte_crypto_op **ops,
56 		uint32_t src_buf_offset __rte_unused,
57 		uint32_t dst_buf_offset __rte_unused,
58 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
59 		const struct cperf_options *options,
60 		const struct cperf_test_vector *test_vector,
61 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
62 		uint64_t *tsc_start)
63 {
64 	uint64_t tsc_start_temp, tsc_end_temp;
65 	uint16_t i;
66 
67 	for (i = 0; i < nb_ops; i++) {
68 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
69 		struct rte_security_session *sec_sess =
70 			(struct rte_security_session *)sess;
71 		uint32_t buf_sz;
72 
73 		uint32_t *per_pkt_hfn = rte_crypto_op_ctod_offset(ops[i],
74 					uint32_t *, iv_offset);
75 		*per_pkt_hfn = options->pdcp_ses_hfn_en ? 0 : PDCP_DEFAULT_HFN;
76 
77 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
78 		rte_security_attach_session(ops[i], sec_sess);
79 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
80 							src_buf_offset);
81 
82 		if (options->op_type == CPERF_PDCP ||
83 				options->op_type == CPERF_IPSEC) {
84 			/* In case of IPsec, headroom is consumed by PMD,
85 			 * hence resetting it.
86 			 */
87 			sym_op->m_src->data_off = options->headroom_sz;
88 
89 			sym_op->m_src->buf_len = options->segment_sz;
90 			sym_op->m_src->data_len = options->test_buffer_size;
91 			sym_op->m_src->pkt_len = sym_op->m_src->data_len;
92 
93 			if ((options->op_type == CPERF_IPSEC) &&
94 			    (options->test_file == NULL) &&
95 			    (options->test == CPERF_TEST_TYPE_THROUGHPUT)) {
96 				tsc_start_temp = rte_rdtsc_precise();
97 				test_ipsec_vec_populate(sym_op->m_src, options,
98 							test_vector);
99 				tsc_end_temp = rte_rdtsc_precise();
100 
101 				*tsc_start += (tsc_end_temp - tsc_start_temp);
102 			}
103 		}
104 
105 		if (options->op_type == CPERF_DOCSIS) {
106 			if (options->imix_distribution_count) {
107 				buf_sz = options->imix_buffer_sizes[*imix_idx];
108 				*imix_idx = (*imix_idx + 1) % options->pool_sz;
109 			} else
110 				buf_sz = options->test_buffer_size;
111 
112 			sym_op->m_src->buf_len = options->segment_sz;
113 			sym_op->m_src->data_len = buf_sz;
114 			sym_op->m_src->pkt_len = buf_sz;
115 
116 			/* DOCSIS header is not CRC'ed */
117 			sym_op->auth.data.offset = options->docsis_hdr_sz;
118 			sym_op->auth.data.length = buf_sz -
119 				sym_op->auth.data.offset - RTE_ETHER_CRC_LEN;
120 			/*
121 			 * DOCSIS header and SRC and DST MAC addresses are not
122 			 * ciphered
123 			 */
124 			sym_op->cipher.data.offset = sym_op->auth.data.offset +
125 				RTE_ETHER_HDR_LEN - RTE_ETHER_TYPE_LEN;
126 			sym_op->cipher.data.length = buf_sz -
127 				sym_op->cipher.data.offset;
128 		}
129 
130 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
131 		if (dst_buf_offset == 0)
132 			sym_op->m_dst = NULL;
133 		else
134 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
135 							dst_buf_offset);
136 	}
137 
138 	return 0;
139 }
140 #endif
141 
142 static int
143 cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
144 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
145 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
146 		const struct cperf_options *options,
147 		const struct cperf_test_vector *test_vector __rte_unused,
148 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
149 		uint64_t *tsc_start __rte_unused)
150 {
151 	uint16_t i;
152 
153 	for (i = 0; i < nb_ops; i++) {
154 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
155 
156 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
157 		rte_crypto_op_attach_sym_session(ops[i], sess);
158 
159 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
160 							src_buf_offset);
161 
162 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
163 		if (dst_buf_offset == 0)
164 			sym_op->m_dst = NULL;
165 		else
166 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
167 							dst_buf_offset);
168 
169 		/* cipher parameters */
170 		if (options->imix_distribution_count) {
171 			sym_op->cipher.data.length =
172 				options->imix_buffer_sizes[*imix_idx];
173 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
174 		} else
175 			sym_op->cipher.data.length = options->test_buffer_size;
176 		sym_op->cipher.data.offset = 0;
177 	}
178 
179 	return 0;
180 }
181 
182 static int
183 cperf_set_ops_null_auth(struct rte_crypto_op **ops,
184 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
185 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
186 		const struct cperf_options *options,
187 		const struct cperf_test_vector *test_vector __rte_unused,
188 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
189 		uint64_t *tsc_start __rte_unused)
190 {
191 	uint16_t i;
192 
193 	for (i = 0; i < nb_ops; i++) {
194 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
195 
196 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
197 		rte_crypto_op_attach_sym_session(ops[i], sess);
198 
199 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
200 							src_buf_offset);
201 
202 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
203 		if (dst_buf_offset == 0)
204 			sym_op->m_dst = NULL;
205 		else
206 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
207 							dst_buf_offset);
208 
209 		/* auth parameters */
210 		if (options->imix_distribution_count) {
211 			sym_op->auth.data.length =
212 				options->imix_buffer_sizes[*imix_idx];
213 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
214 		} else
215 			sym_op->auth.data.length = options->test_buffer_size;
216 		sym_op->auth.data.offset = 0;
217 	}
218 
219 	return 0;
220 }
221 
222 static int
223 cperf_set_ops_cipher(struct rte_crypto_op **ops,
224 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
225 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
226 		const struct cperf_options *options,
227 		const struct cperf_test_vector *test_vector,
228 		uint16_t iv_offset, uint32_t *imix_idx,
229 		uint64_t *tsc_start __rte_unused)
230 {
231 	uint16_t i;
232 
233 	for (i = 0; i < nb_ops; i++) {
234 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
235 
236 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
237 		rte_crypto_op_attach_sym_session(ops[i], sess);
238 
239 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
240 							src_buf_offset);
241 
242 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
243 		if (dst_buf_offset == 0)
244 			sym_op->m_dst = NULL;
245 		else
246 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
247 							dst_buf_offset);
248 
249 		/* cipher parameters */
250 		if (options->imix_distribution_count) {
251 			sym_op->cipher.data.length =
252 				options->imix_buffer_sizes[*imix_idx];
253 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
254 		} else
255 			sym_op->cipher.data.length = options->test_buffer_size;
256 
257 		if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
258 				options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
259 				options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
260 			sym_op->cipher.data.length <<= 3;
261 
262 		sym_op->cipher.data.offset = 0;
263 	}
264 
265 	if (options->test == CPERF_TEST_TYPE_VERIFY) {
266 		for (i = 0; i < nb_ops; i++) {
267 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
268 					uint8_t *, iv_offset);
269 
270 			memcpy(iv_ptr, test_vector->cipher_iv.data,
271 					test_vector->cipher_iv.length);
272 
273 		}
274 	}
275 
276 	return 0;
277 }
278 
279 static int
280 cperf_set_ops_auth(struct rte_crypto_op **ops,
281 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
282 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
283 		const struct cperf_options *options,
284 		const struct cperf_test_vector *test_vector,
285 		uint16_t iv_offset, uint32_t *imix_idx,
286 		uint64_t *tsc_start __rte_unused)
287 {
288 	uint16_t i;
289 
290 	for (i = 0; i < nb_ops; i++) {
291 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
292 
293 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
294 		rte_crypto_op_attach_sym_session(ops[i], sess);
295 
296 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
297 							src_buf_offset);
298 
299 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
300 		if (dst_buf_offset == 0)
301 			sym_op->m_dst = NULL;
302 		else
303 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
304 							dst_buf_offset);
305 
306 		if (test_vector->auth_iv.length) {
307 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
308 								uint8_t *,
309 								iv_offset);
310 			memcpy(iv_ptr, test_vector->auth_iv.data,
311 					test_vector->auth_iv.length);
312 		}
313 
314 		/* authentication parameters */
315 		if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
316 			sym_op->auth.digest.data = test_vector->digest.data;
317 			sym_op->auth.digest.phys_addr =
318 					test_vector->digest.phys_addr;
319 		} else {
320 
321 			uint32_t offset = options->test_buffer_size;
322 			struct rte_mbuf *buf, *tbuf;
323 
324 			if (options->out_of_place) {
325 				buf = sym_op->m_dst;
326 			} else {
327 				tbuf = sym_op->m_src;
328 				while ((tbuf->next != NULL) &&
329 						(offset >= tbuf->data_len)) {
330 					offset -= tbuf->data_len;
331 					tbuf = tbuf->next;
332 				}
333 				/*
334 				 * If there is not enough room in segment,
335 				 * place the digest in the next segment
336 				 */
337 				if ((tbuf->data_len - offset) < options->digest_sz) {
338 					tbuf = tbuf->next;
339 					offset = 0;
340 				}
341 				buf = tbuf;
342 			}
343 
344 			sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
345 					uint8_t *, offset);
346 			sym_op->auth.digest.phys_addr =
347 					rte_pktmbuf_iova_offset(buf, offset);
348 
349 		}
350 
351 		if (options->imix_distribution_count) {
352 			sym_op->auth.data.length =
353 				options->imix_buffer_sizes[*imix_idx];
354 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
355 		} else
356 			sym_op->auth.data.length = options->test_buffer_size;
357 
358 		if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
359 				options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
360 				options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3)
361 			sym_op->auth.data.length <<= 3;
362 
363 		sym_op->auth.data.offset = 0;
364 	}
365 
366 	if (options->test == CPERF_TEST_TYPE_VERIFY) {
367 		if (test_vector->auth_iv.length) {
368 			for (i = 0; i < nb_ops; i++) {
369 				uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
370 						uint8_t *, iv_offset);
371 
372 				memcpy(iv_ptr, test_vector->auth_iv.data,
373 						test_vector->auth_iv.length);
374 			}
375 		}
376 	}
377 	return 0;
378 }
379 
380 static int
381 cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
382 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
383 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
384 		const struct cperf_options *options,
385 		const struct cperf_test_vector *test_vector,
386 		uint16_t iv_offset, uint32_t *imix_idx,
387 		uint64_t *tsc_start __rte_unused)
388 {
389 	uint16_t i;
390 
391 	for (i = 0; i < nb_ops; i++) {
392 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
393 
394 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
395 		rte_crypto_op_attach_sym_session(ops[i], sess);
396 
397 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
398 							src_buf_offset);
399 
400 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
401 		if (dst_buf_offset == 0)
402 			sym_op->m_dst = NULL;
403 		else
404 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
405 							dst_buf_offset);
406 
407 		/* cipher parameters */
408 		if (options->imix_distribution_count) {
409 			sym_op->cipher.data.length =
410 				options->imix_buffer_sizes[*imix_idx];
411 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
412 		} else
413 			sym_op->cipher.data.length = options->test_buffer_size;
414 
415 		if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
416 				options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
417 				options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
418 			sym_op->cipher.data.length <<= 3;
419 
420 		sym_op->cipher.data.offset = 0;
421 
422 		/* authentication parameters */
423 		if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
424 			sym_op->auth.digest.data = test_vector->digest.data;
425 			sym_op->auth.digest.phys_addr =
426 					test_vector->digest.phys_addr;
427 		} else {
428 
429 			uint32_t offset = options->test_buffer_size;
430 			struct rte_mbuf *buf, *tbuf;
431 
432 			if (options->out_of_place) {
433 				buf = sym_op->m_dst;
434 			} else {
435 				tbuf = sym_op->m_src;
436 				while ((tbuf->next != NULL) &&
437 						(offset >= tbuf->data_len)) {
438 					offset -= tbuf->data_len;
439 					tbuf = tbuf->next;
440 				}
441 				/*
442 				 * If there is not enough room in segment,
443 				 * place the digest in the next segment
444 				 */
445 				if ((tbuf->data_len - offset) < options->digest_sz) {
446 					tbuf = tbuf->next;
447 					offset = 0;
448 				}
449 				buf = tbuf;
450 			}
451 
452 			sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
453 					uint8_t *, offset);
454 			sym_op->auth.digest.phys_addr =
455 					rte_pktmbuf_iova_offset(buf, offset);
456 		}
457 
458 		if (options->imix_distribution_count) {
459 			sym_op->auth.data.length =
460 				options->imix_buffer_sizes[*imix_idx];
461 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
462 		} else
463 			sym_op->auth.data.length = options->test_buffer_size;
464 
465 		if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
466 				options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
467 				options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3)
468 			sym_op->auth.data.length <<= 3;
469 
470 		sym_op->auth.data.offset = 0;
471 	}
472 
473 	if (options->test == CPERF_TEST_TYPE_VERIFY) {
474 		for (i = 0; i < nb_ops; i++) {
475 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
476 					uint8_t *, iv_offset);
477 
478 			memcpy(iv_ptr, test_vector->cipher_iv.data,
479 					test_vector->cipher_iv.length);
480 			if (test_vector->auth_iv.length) {
481 				/*
482 				 * Copy IV after the crypto operation and
483 				 * the cipher IV
484 				 */
485 				iv_ptr += test_vector->cipher_iv.length;
486 				memcpy(iv_ptr, test_vector->auth_iv.data,
487 						test_vector->auth_iv.length);
488 			}
489 		}
490 
491 	}
492 
493 	return 0;
494 }
495 
496 static int
497 cperf_set_ops_aead(struct rte_crypto_op **ops,
498 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
499 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
500 		const struct cperf_options *options,
501 		const struct cperf_test_vector *test_vector,
502 		uint16_t iv_offset, uint32_t *imix_idx,
503 		uint64_t *tsc_start __rte_unused)
504 {
505 	uint16_t i;
506 	/* AAD is placed after the IV */
507 	uint16_t aad_offset = iv_offset +
508 			RTE_ALIGN_CEIL(test_vector->aead_iv.length, 16);
509 
510 	for (i = 0; i < nb_ops; i++) {
511 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
512 
513 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
514 		rte_crypto_op_attach_sym_session(ops[i], sess);
515 
516 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
517 							src_buf_offset);
518 
519 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
520 		if (dst_buf_offset == 0)
521 			sym_op->m_dst = NULL;
522 		else
523 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
524 							dst_buf_offset);
525 
526 		/* AEAD parameters */
527 		if (options->imix_distribution_count) {
528 			sym_op->aead.data.length =
529 				options->imix_buffer_sizes[*imix_idx];
530 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
531 		} else
532 			sym_op->aead.data.length = options->test_buffer_size;
533 		sym_op->aead.data.offset = 0;
534 
535 		sym_op->aead.aad.data = rte_crypto_op_ctod_offset(ops[i],
536 					uint8_t *, aad_offset);
537 		sym_op->aead.aad.phys_addr = rte_crypto_op_ctophys_offset(ops[i],
538 					aad_offset);
539 
540 		if (options->aead_op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
541 			sym_op->aead.digest.data = test_vector->digest.data;
542 			sym_op->aead.digest.phys_addr =
543 					test_vector->digest.phys_addr;
544 		} else {
545 
546 			uint32_t offset = sym_op->aead.data.length +
547 						sym_op->aead.data.offset;
548 			struct rte_mbuf *buf, *tbuf;
549 
550 			if (options->out_of_place) {
551 				buf = sym_op->m_dst;
552 			} else {
553 				tbuf = sym_op->m_src;
554 				while ((tbuf->next != NULL) &&
555 						(offset >= tbuf->data_len)) {
556 					offset -= tbuf->data_len;
557 					tbuf = tbuf->next;
558 				}
559 				/*
560 				 * If there is not enough room in segment,
561 				 * place the digest in the next segment
562 				 */
563 				if ((tbuf->data_len - offset) < options->digest_sz) {
564 					tbuf = tbuf->next;
565 					offset = 0;
566 				}
567 				buf = tbuf;
568 			}
569 
570 			sym_op->aead.digest.data = rte_pktmbuf_mtod_offset(buf,
571 					uint8_t *, offset);
572 			sym_op->aead.digest.phys_addr =
573 					rte_pktmbuf_iova_offset(buf, offset);
574 		}
575 	}
576 
577 	if (options->test == CPERF_TEST_TYPE_VERIFY) {
578 		for (i = 0; i < nb_ops; i++) {
579 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
580 					uint8_t *, iv_offset);
581 
582 			/*
583 			 * If doing AES-CCM, nonce is copied one byte
584 			 * after the start of IV field, and AAD is copied
585 			 * 18 bytes after the start of the AAD field.
586 			 */
587 			if (options->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) {
588 				memcpy(iv_ptr + 1, test_vector->aead_iv.data,
589 					test_vector->aead_iv.length);
590 
591 				memcpy(ops[i]->sym->aead.aad.data + 18,
592 					test_vector->aad.data,
593 					test_vector->aad.length);
594 			} else {
595 				memcpy(iv_ptr, test_vector->aead_iv.data,
596 					test_vector->aead_iv.length);
597 
598 				memcpy(ops[i]->sym->aead.aad.data,
599 					test_vector->aad.data,
600 					test_vector->aad.length);
601 			}
602 		}
603 	}
604 
605 	return 0;
606 }
607 
608 static struct rte_cryptodev_sym_session *
609 create_ipsec_session(struct rte_mempool *sess_mp,
610 		struct rte_mempool *priv_mp,
611 		uint8_t dev_id,
612 		const struct cperf_options *options,
613 		const struct cperf_test_vector *test_vector,
614 		uint16_t iv_offset)
615 {
616 	struct rte_crypto_sym_xform xform = {0};
617 	struct rte_crypto_sym_xform auth_xform = {0};
618 
619 	if (options->aead_algo != 0) {
620 		/* Setup AEAD Parameters */
621 		xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
622 		xform.next = NULL;
623 		xform.aead.algo = options->aead_algo;
624 		xform.aead.op = options->aead_op;
625 		xform.aead.iv.offset = iv_offset;
626 		xform.aead.key.data = test_vector->aead_key.data;
627 		xform.aead.key.length = test_vector->aead_key.length;
628 		xform.aead.iv.length = test_vector->aead_iv.length;
629 		xform.aead.digest_length = options->digest_sz;
630 		xform.aead.aad_length = options->aead_aad_sz;
631 	} else if (options->cipher_algo != 0 && options->auth_algo != 0) {
632 		/* Setup Cipher Parameters */
633 		xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
634 		xform.next = NULL;
635 		xform.cipher.algo = options->cipher_algo;
636 		xform.cipher.op = options->cipher_op;
637 		xform.cipher.iv.offset = iv_offset;
638 		xform.cipher.iv.length = test_vector->cipher_iv.length;
639 		/* cipher different than null */
640 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
641 			xform.cipher.key.data = test_vector->cipher_key.data;
642 			xform.cipher.key.length =
643 				test_vector->cipher_key.length;
644 		} else {
645 			xform.cipher.key.data = NULL;
646 			xform.cipher.key.length = 0;
647 		}
648 
649 		/* Setup Auth Parameters */
650 		auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
651 		auth_xform.next = NULL;
652 		auth_xform.auth.algo = options->auth_algo;
653 		auth_xform.auth.op = options->auth_op;
654 		auth_xform.auth.iv.offset = iv_offset +
655 				xform.cipher.iv.length;
656 		/* auth different than null */
657 		if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
658 			auth_xform.auth.digest_length = options->digest_sz;
659 			auth_xform.auth.key.length =
660 						test_vector->auth_key.length;
661 			auth_xform.auth.key.data = test_vector->auth_key.data;
662 			auth_xform.auth.iv.length = test_vector->auth_iv.length;
663 		} else {
664 			auth_xform.auth.digest_length = 0;
665 			auth_xform.auth.key.length = 0;
666 			auth_xform.auth.key.data = NULL;
667 			auth_xform.auth.iv.length = 0;
668 		}
669 
670 		xform.next = &auth_xform;
671 	} else {
672 		return NULL;
673 	}
674 
675 #define CPERF_IPSEC_SRC_IP	0x01010101
676 #define CPERF_IPSEC_DST_IP	0x02020202
677 #define CPERF_IPSEC_SALT	0x0
678 #define CPERF_IPSEC_DEFTTL	64
679 	struct rte_security_ipsec_tunnel_param tunnel = {
680 		.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4,
681 		{.ipv4 = {
682 			.src_ip = { .s_addr = CPERF_IPSEC_SRC_IP},
683 			.dst_ip = { .s_addr = CPERF_IPSEC_DST_IP},
684 			.dscp = 0,
685 			.df = 0,
686 			.ttl = CPERF_IPSEC_DEFTTL,
687 		} },
688 	};
689 	struct rte_security_session_conf sess_conf = {
690 		.action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
691 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
692 		{.ipsec = {
693 			.spi = rte_lcore_id(),
694 			/**< For testing sake, lcore_id is taken as SPI so that
695 			 * for every core a different session is created.
696 			 */
697 			.salt = CPERF_IPSEC_SALT,
698 			.options = { 0 },
699 			.replay_win_sz = 0,
700 			.direction =
701 				((options->cipher_op ==
702 					RTE_CRYPTO_CIPHER_OP_ENCRYPT) &&
703 				(options->auth_op ==
704 					RTE_CRYPTO_AUTH_OP_GENERATE)) ||
705 				(options->aead_op ==
706 					RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
707 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS :
708 				RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
709 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
710 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
711 			.tunnel = tunnel,
712 		} },
713 		.userdata = NULL,
714 		.crypto_xform = &xform
715 	};
716 
717 	struct rte_security_ctx *ctx = (struct rte_security_ctx *)
718 				rte_cryptodev_get_sec_ctx(dev_id);
719 
720 	/* Create security session */
721 	return (void *)rte_security_session_create(ctx,
722 				&sess_conf, sess_mp, priv_mp);
723 }
724 
725 static struct rte_cryptodev_sym_session *
726 cperf_create_session(struct rte_mempool *sess_mp,
727 	struct rte_mempool *priv_mp,
728 	uint8_t dev_id,
729 	const struct cperf_options *options,
730 	const struct cperf_test_vector *test_vector,
731 	uint16_t iv_offset)
732 {
733 	struct rte_crypto_sym_xform cipher_xform;
734 	struct rte_crypto_sym_xform auth_xform;
735 	struct rte_crypto_sym_xform aead_xform;
736 	struct rte_cryptodev_sym_session *sess = NULL;
737 	struct rte_crypto_asym_xform xform = {0};
738 	int rc;
739 
740 	if (options->op_type == CPERF_ASYM_MODEX) {
741 		xform.next = NULL;
742 		xform.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX;
743 		xform.modex.modulus.data = perf_mod_p;
744 		xform.modex.modulus.length = sizeof(perf_mod_p);
745 		xform.modex.exponent.data = perf_mod_e;
746 		xform.modex.exponent.length = sizeof(perf_mod_e);
747 
748 		sess = (void *)rte_cryptodev_asym_session_create(sess_mp);
749 		if (sess == NULL)
750 			return NULL;
751 		rc = rte_cryptodev_asym_session_init(dev_id, (void *)sess,
752 						     &xform, priv_mp);
753 		if (rc < 0) {
754 			if (sess != NULL) {
755 				rte_cryptodev_asym_session_clear(dev_id,
756 								 (void *)sess);
757 				rte_cryptodev_asym_session_free((void *)sess);
758 			}
759 			return NULL;
760 		}
761 		return sess;
762 	}
763 #ifdef RTE_LIB_SECURITY
764 	/*
765 	 * security only
766 	 */
767 	if (options->op_type == CPERF_PDCP) {
768 		/* Setup Cipher Parameters */
769 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
770 		cipher_xform.next = NULL;
771 		cipher_xform.cipher.algo = options->cipher_algo;
772 		cipher_xform.cipher.op = options->cipher_op;
773 		cipher_xform.cipher.iv.offset = iv_offset;
774 		cipher_xform.cipher.iv.length = 4;
775 
776 		/* cipher different than null */
777 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
778 			cipher_xform.cipher.key.data = test_vector->cipher_key.data;
779 			cipher_xform.cipher.key.length = test_vector->cipher_key.length;
780 		} else {
781 			cipher_xform.cipher.key.data = NULL;
782 			cipher_xform.cipher.key.length = 0;
783 		}
784 
785 		/* Setup Auth Parameters */
786 		if (options->auth_algo != 0) {
787 			auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
788 			auth_xform.next = NULL;
789 			auth_xform.auth.algo = options->auth_algo;
790 			auth_xform.auth.op = options->auth_op;
791 			auth_xform.auth.iv.offset = iv_offset +
792 				cipher_xform.cipher.iv.length;
793 
794 			/* auth different than null */
795 			if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
796 				auth_xform.auth.digest_length = options->digest_sz;
797 				auth_xform.auth.key.length = test_vector->auth_key.length;
798 				auth_xform.auth.key.data = test_vector->auth_key.data;
799 				auth_xform.auth.iv.length = test_vector->auth_iv.length;
800 			} else {
801 				auth_xform.auth.digest_length = 0;
802 				auth_xform.auth.key.length = 0;
803 				auth_xform.auth.key.data = NULL;
804 				auth_xform.auth.iv.length = 0;
805 			}
806 
807 			cipher_xform.next = &auth_xform;
808 		} else {
809 			cipher_xform.next = NULL;
810 		}
811 
812 		struct rte_security_session_conf sess_conf = {
813 			.action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
814 			.protocol = RTE_SECURITY_PROTOCOL_PDCP,
815 			{.pdcp = {
816 				.bearer = 0x16,
817 				.domain = options->pdcp_domain,
818 				.pkt_dir = 0,
819 				.sn_size = options->pdcp_sn_sz,
820 				.hfn = options->pdcp_ses_hfn_en ?
821 					PDCP_DEFAULT_HFN : 0,
822 				.hfn_threshold = 0x70C0A,
823 				.hfn_ovrd = !(options->pdcp_ses_hfn_en),
824 			} },
825 			.crypto_xform = &cipher_xform
826 		};
827 
828 		struct rte_security_ctx *ctx = (struct rte_security_ctx *)
829 					rte_cryptodev_get_sec_ctx(dev_id);
830 
831 		/* Create security session */
832 		return (void *)rte_security_session_create(ctx,
833 					&sess_conf, sess_mp, priv_mp);
834 	}
835 
836 	if (options->op_type == CPERF_IPSEC) {
837 		return create_ipsec_session(sess_mp, priv_mp, dev_id,
838 				options, test_vector, iv_offset);
839 	}
840 
841 	if (options->op_type == CPERF_DOCSIS) {
842 		enum rte_security_docsis_direction direction;
843 
844 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
845 		cipher_xform.next = NULL;
846 		cipher_xform.cipher.algo = options->cipher_algo;
847 		cipher_xform.cipher.op = options->cipher_op;
848 		cipher_xform.cipher.iv.offset = iv_offset;
849 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
850 			cipher_xform.cipher.key.data =
851 				test_vector->cipher_key.data;
852 			cipher_xform.cipher.key.length =
853 				test_vector->cipher_key.length;
854 			cipher_xform.cipher.iv.length =
855 				test_vector->cipher_iv.length;
856 		} else {
857 			cipher_xform.cipher.key.data = NULL;
858 			cipher_xform.cipher.key.length = 0;
859 			cipher_xform.cipher.iv.length = 0;
860 		}
861 		cipher_xform.next = NULL;
862 
863 		if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
864 			direction = RTE_SECURITY_DOCSIS_DOWNLINK;
865 		else
866 			direction = RTE_SECURITY_DOCSIS_UPLINK;
867 
868 		struct rte_security_session_conf sess_conf = {
869 			.action_type =
870 				RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
871 			.protocol = RTE_SECURITY_PROTOCOL_DOCSIS,
872 			{.docsis = {
873 				.direction = direction,
874 			} },
875 			.crypto_xform = &cipher_xform
876 		};
877 		struct rte_security_ctx *ctx = (struct rte_security_ctx *)
878 					rte_cryptodev_get_sec_ctx(dev_id);
879 
880 		/* Create security session */
881 		return (void *)rte_security_session_create(ctx,
882 					&sess_conf, sess_mp, priv_mp);
883 	}
884 #endif
885 	sess = rte_cryptodev_sym_session_create(sess_mp);
886 	/*
887 	 * cipher only
888 	 */
889 	if (options->op_type == CPERF_CIPHER_ONLY) {
890 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
891 		cipher_xform.next = NULL;
892 		cipher_xform.cipher.algo = options->cipher_algo;
893 		cipher_xform.cipher.op = options->cipher_op;
894 		cipher_xform.cipher.iv.offset = iv_offset;
895 
896 		/* cipher different than null */
897 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
898 			cipher_xform.cipher.key.data =
899 					test_vector->cipher_key.data;
900 			cipher_xform.cipher.key.length =
901 					test_vector->cipher_key.length;
902 			cipher_xform.cipher.iv.length =
903 					test_vector->cipher_iv.length;
904 		} else {
905 			cipher_xform.cipher.key.data = NULL;
906 			cipher_xform.cipher.key.length = 0;
907 			cipher_xform.cipher.iv.length = 0;
908 		}
909 		/* create crypto session */
910 		rte_cryptodev_sym_session_init(dev_id, sess, &cipher_xform,
911 				priv_mp);
912 	/*
913 	 *  auth only
914 	 */
915 	} else if (options->op_type == CPERF_AUTH_ONLY) {
916 		auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
917 		auth_xform.next = NULL;
918 		auth_xform.auth.algo = options->auth_algo;
919 		auth_xform.auth.op = options->auth_op;
920 		auth_xform.auth.iv.offset = iv_offset;
921 
922 		/* auth different than null */
923 		if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
924 			auth_xform.auth.digest_length =
925 					options->digest_sz;
926 			auth_xform.auth.key.length =
927 					test_vector->auth_key.length;
928 			auth_xform.auth.key.data = test_vector->auth_key.data;
929 			auth_xform.auth.iv.length =
930 					test_vector->auth_iv.length;
931 		} else {
932 			auth_xform.auth.digest_length = 0;
933 			auth_xform.auth.key.length = 0;
934 			auth_xform.auth.key.data = NULL;
935 			auth_xform.auth.iv.length = 0;
936 		}
937 		/* create crypto session */
938 		rte_cryptodev_sym_session_init(dev_id, sess, &auth_xform,
939 				priv_mp);
940 	/*
941 	 * cipher and auth
942 	 */
943 	} else if (options->op_type == CPERF_CIPHER_THEN_AUTH
944 			|| options->op_type == CPERF_AUTH_THEN_CIPHER) {
945 		/*
946 		 * cipher
947 		 */
948 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
949 		cipher_xform.next = NULL;
950 		cipher_xform.cipher.algo = options->cipher_algo;
951 		cipher_xform.cipher.op = options->cipher_op;
952 		cipher_xform.cipher.iv.offset = iv_offset;
953 
954 		/* cipher different than null */
955 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
956 			cipher_xform.cipher.key.data =
957 					test_vector->cipher_key.data;
958 			cipher_xform.cipher.key.length =
959 					test_vector->cipher_key.length;
960 			cipher_xform.cipher.iv.length =
961 					test_vector->cipher_iv.length;
962 		} else {
963 			cipher_xform.cipher.key.data = NULL;
964 			cipher_xform.cipher.key.length = 0;
965 			cipher_xform.cipher.iv.length = 0;
966 		}
967 
968 		/*
969 		 * auth
970 		 */
971 		auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
972 		auth_xform.next = NULL;
973 		auth_xform.auth.algo = options->auth_algo;
974 		auth_xform.auth.op = options->auth_op;
975 		auth_xform.auth.iv.offset = iv_offset +
976 			cipher_xform.cipher.iv.length;
977 
978 		/* auth different than null */
979 		if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
980 			auth_xform.auth.digest_length = options->digest_sz;
981 			auth_xform.auth.iv.length = test_vector->auth_iv.length;
982 			auth_xform.auth.key.length =
983 					test_vector->auth_key.length;
984 			auth_xform.auth.key.data =
985 					test_vector->auth_key.data;
986 		} else {
987 			auth_xform.auth.digest_length = 0;
988 			auth_xform.auth.key.length = 0;
989 			auth_xform.auth.key.data = NULL;
990 			auth_xform.auth.iv.length = 0;
991 		}
992 
993 		/* cipher then auth */
994 		if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
995 			cipher_xform.next = &auth_xform;
996 			/* create crypto session */
997 			rte_cryptodev_sym_session_init(dev_id,
998 					sess, &cipher_xform, priv_mp);
999 		} else { /* auth then cipher */
1000 			auth_xform.next = &cipher_xform;
1001 			/* create crypto session */
1002 			rte_cryptodev_sym_session_init(dev_id,
1003 					sess, &auth_xform, priv_mp);
1004 		}
1005 	} else { /* options->op_type == CPERF_AEAD */
1006 		aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1007 		aead_xform.next = NULL;
1008 		aead_xform.aead.algo = options->aead_algo;
1009 		aead_xform.aead.op = options->aead_op;
1010 		aead_xform.aead.iv.offset = iv_offset;
1011 
1012 		aead_xform.aead.key.data =
1013 					test_vector->aead_key.data;
1014 		aead_xform.aead.key.length =
1015 					test_vector->aead_key.length;
1016 		aead_xform.aead.iv.length = test_vector->aead_iv.length;
1017 
1018 		aead_xform.aead.digest_length = options->digest_sz;
1019 		aead_xform.aead.aad_length =
1020 					options->aead_aad_sz;
1021 
1022 		/* Create crypto session */
1023 		rte_cryptodev_sym_session_init(dev_id,
1024 					sess, &aead_xform, priv_mp);
1025 	}
1026 
1027 	return sess;
1028 }
1029 
1030 int
1031 cperf_get_op_functions(const struct cperf_options *options,
1032 		struct cperf_op_fns *op_fns)
1033 {
1034 	memset(op_fns, 0, sizeof(struct cperf_op_fns));
1035 
1036 	op_fns->sess_create = cperf_create_session;
1037 
1038 	switch (options->op_type) {
1039 	case CPERF_AEAD:
1040 		op_fns->populate_ops = cperf_set_ops_aead;
1041 		break;
1042 
1043 	case CPERF_AUTH_THEN_CIPHER:
1044 	case CPERF_CIPHER_THEN_AUTH:
1045 		op_fns->populate_ops = cperf_set_ops_cipher_auth;
1046 		break;
1047 	case CPERF_AUTH_ONLY:
1048 		if (options->auth_algo == RTE_CRYPTO_AUTH_NULL)
1049 			op_fns->populate_ops = cperf_set_ops_null_auth;
1050 		else
1051 			op_fns->populate_ops = cperf_set_ops_auth;
1052 		break;
1053 	case CPERF_CIPHER_ONLY:
1054 		if (options->cipher_algo == RTE_CRYPTO_CIPHER_NULL)
1055 			op_fns->populate_ops = cperf_set_ops_null_cipher;
1056 		else
1057 			op_fns->populate_ops = cperf_set_ops_cipher;
1058 		break;
1059 	case CPERF_ASYM_MODEX:
1060 		op_fns->populate_ops = cperf_set_ops_asym;
1061 		break;
1062 #ifdef RTE_LIB_SECURITY
1063 	case CPERF_PDCP:
1064 	case CPERF_IPSEC:
1065 	case CPERF_DOCSIS:
1066 		op_fns->populate_ops = cperf_set_ops_security;
1067 		break;
1068 #endif
1069 	default:
1070 		return -1;
1071 	}
1072 
1073 	return 0;
1074 }
1075