xref: /dpdk/app/test-crypto-perf/cperf_ops.c (revision 54140461b60485941da282d8da2db2f2bc19e281)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <rte_ether.h>
7 #include <rte_ip.h>
8 
9 #include "cperf_ops.h"
10 #include "cperf_test_vectors.h"
11 
12 static void
13 cperf_set_ops_asym(struct rte_crypto_op **ops,
14 		   uint32_t src_buf_offset __rte_unused,
15 		   uint32_t dst_buf_offset __rte_unused, uint16_t nb_ops,
16 		   void *sess,
17 		   const struct cperf_options *options,
18 		   const struct cperf_test_vector *test_vector __rte_unused,
19 		   uint16_t iv_offset __rte_unused,
20 		   uint32_t *imix_idx __rte_unused,
21 		   uint64_t *tsc_start __rte_unused)
22 {
23 	uint16_t i;
24 	void *asym_sess = (void *)sess;
25 
26 	for (i = 0; i < nb_ops; i++) {
27 		struct rte_crypto_asym_op *asym_op = ops[i]->asym;
28 
29 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
30 		asym_op->modex.base.data = options->modex_data->base.data;
31 		asym_op->modex.base.length = options->modex_data->base.len;
32 		asym_op->modex.result.data = options->modex_data->result.data;
33 		asym_op->modex.result.length = options->modex_data->result.len;
34 		rte_crypto_op_attach_asym_session(ops[i], asym_sess);
35 	}
36 }
37 
38 #ifdef RTE_LIB_SECURITY
39 static void
40 test_ipsec_vec_populate(struct rte_mbuf *m, const struct cperf_options *options,
41 			const struct cperf_test_vector *test_vector)
42 {
43 	struct rte_ipv4_hdr *ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *);
44 
45 	if (options->is_outbound) {
46 		memcpy(ip, test_vector->plaintext.data,
47 		       sizeof(struct rte_ipv4_hdr));
48 
49 		ip->total_length = rte_cpu_to_be_16(m->data_len);
50 	}
51 }
52 
53 static void
54 cperf_set_ops_security(struct rte_crypto_op **ops,
55 		uint32_t src_buf_offset __rte_unused,
56 		uint32_t dst_buf_offset __rte_unused,
57 		uint16_t nb_ops, void *sess,
58 		const struct cperf_options *options,
59 		const struct cperf_test_vector *test_vector,
60 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
61 		uint64_t *tsc_start)
62 {
63 	uint16_t i;
64 
65 	for (i = 0; i < nb_ops; i++) {
66 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
67 		void *sec_sess = (void *)sess;
68 		uint32_t buf_sz;
69 
70 		uint32_t *per_pkt_hfn = rte_crypto_op_ctod_offset(ops[i],
71 					uint32_t *, iv_offset);
72 		*per_pkt_hfn = options->pdcp_ses_hfn_en ? 0 : PDCP_DEFAULT_HFN;
73 
74 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
75 		rte_security_attach_session(ops[i], sec_sess);
76 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
77 							src_buf_offset);
78 
79 		if (options->op_type == CPERF_PDCP) {
80 			sym_op->m_src->buf_len = options->segment_sz;
81 			sym_op->m_src->data_len = options->test_buffer_size;
82 			sym_op->m_src->pkt_len = sym_op->m_src->data_len;
83 		}
84 
85 		if (options->op_type == CPERF_DOCSIS) {
86 			if (options->imix_distribution_count) {
87 				buf_sz = options->imix_buffer_sizes[*imix_idx];
88 				*imix_idx = (*imix_idx + 1) % options->pool_sz;
89 			} else
90 				buf_sz = options->test_buffer_size;
91 
92 			sym_op->m_src->buf_len = options->segment_sz;
93 			sym_op->m_src->data_len = buf_sz;
94 			sym_op->m_src->pkt_len = buf_sz;
95 
96 			/* DOCSIS header is not CRC'ed */
97 			sym_op->auth.data.offset = options->docsis_hdr_sz;
98 			sym_op->auth.data.length = buf_sz -
99 				sym_op->auth.data.offset - RTE_ETHER_CRC_LEN;
100 			/*
101 			 * DOCSIS header and SRC and DST MAC addresses are not
102 			 * ciphered
103 			 */
104 			sym_op->cipher.data.offset = sym_op->auth.data.offset +
105 				RTE_ETHER_HDR_LEN - RTE_ETHER_TYPE_LEN;
106 			sym_op->cipher.data.length = buf_sz -
107 				sym_op->cipher.data.offset;
108 		}
109 
110 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
111 		if (dst_buf_offset == 0)
112 			sym_op->m_dst = NULL;
113 		else
114 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
115 							dst_buf_offset);
116 	}
117 
118 	RTE_SET_USED(tsc_start);
119 	RTE_SET_USED(test_vector);
120 }
121 
122 static void
123 cperf_set_ops_security_ipsec(struct rte_crypto_op **ops,
124 		uint32_t src_buf_offset __rte_unused,
125 		uint32_t dst_buf_offset __rte_unused,
126 		uint16_t nb_ops, void *sess,
127 		const struct cperf_options *options,
128 		const struct cperf_test_vector *test_vector,
129 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
130 		uint64_t *tsc_start)
131 {
132 	void *sec_sess = sess;
133 	const uint32_t test_buffer_size = options->test_buffer_size;
134 	const uint32_t headroom_sz = options->headroom_sz;
135 	const uint32_t segment_sz = options->segment_sz;
136 	uint64_t tsc_start_temp, tsc_end_temp;
137 	uint16_t i = 0;
138 
139 	RTE_SET_USED(imix_idx);
140 
141 	for (i = 0; i < nb_ops; i++) {
142 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
143 		struct rte_mbuf *m = sym_op->m_src;
144 
145 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
146 		rte_security_attach_session(ops[i], sec_sess);
147 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
148 							src_buf_offset);
149 
150 		/* In case of IPsec, headroom is consumed by PMD,
151 		 * hence resetting it.
152 		 */
153 		m->data_off = headroom_sz;
154 
155 		m->buf_len = segment_sz;
156 		m->data_len = test_buffer_size;
157 		m->pkt_len = test_buffer_size;
158 
159 		sym_op->m_dst = NULL;
160 	}
161 
162 	if (options->test_file != NULL)
163 		return;
164 
165 	tsc_start_temp = rte_rdtsc_precise();
166 
167 	for (i = 0; i < nb_ops; i++) {
168 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
169 		struct rte_mbuf *m = sym_op->m_src;
170 
171 		test_ipsec_vec_populate(m, options, test_vector);
172 	}
173 
174 	tsc_end_temp = rte_rdtsc_precise();
175 	*tsc_start += tsc_end_temp - tsc_start_temp;
176 }
177 
178 #endif
179 
180 static void
181 cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
182 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
183 		uint16_t nb_ops, void *sess,
184 		const struct cperf_options *options,
185 		const struct cperf_test_vector *test_vector __rte_unused,
186 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
187 		uint64_t *tsc_start __rte_unused)
188 {
189 	uint16_t i;
190 
191 	for (i = 0; i < nb_ops; i++) {
192 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
193 
194 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
195 		rte_crypto_op_attach_sym_session(ops[i], sess);
196 
197 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
198 							src_buf_offset);
199 
200 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
201 		if (dst_buf_offset == 0)
202 			sym_op->m_dst = NULL;
203 		else
204 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
205 							dst_buf_offset);
206 
207 		/* cipher parameters */
208 		if (options->imix_distribution_count) {
209 			sym_op->cipher.data.length =
210 				options->imix_buffer_sizes[*imix_idx];
211 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
212 		} else
213 			sym_op->cipher.data.length = options->test_buffer_size;
214 		sym_op->cipher.data.offset = 0;
215 	}
216 }
217 
218 static void
219 cperf_set_ops_null_auth(struct rte_crypto_op **ops,
220 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
221 		uint16_t nb_ops, void *sess,
222 		const struct cperf_options *options,
223 		const struct cperf_test_vector *test_vector __rte_unused,
224 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
225 		uint64_t *tsc_start __rte_unused)
226 {
227 	uint16_t i;
228 
229 	for (i = 0; i < nb_ops; i++) {
230 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
231 
232 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
233 		rte_crypto_op_attach_sym_session(ops[i], sess);
234 
235 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
236 							src_buf_offset);
237 
238 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
239 		if (dst_buf_offset == 0)
240 			sym_op->m_dst = NULL;
241 		else
242 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
243 							dst_buf_offset);
244 
245 		/* auth parameters */
246 		if (options->imix_distribution_count) {
247 			sym_op->auth.data.length =
248 				options->imix_buffer_sizes[*imix_idx];
249 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
250 		} else
251 			sym_op->auth.data.length = options->test_buffer_size;
252 		sym_op->auth.data.offset = 0;
253 	}
254 }
255 
256 static void
257 cperf_set_ops_cipher(struct rte_crypto_op **ops,
258 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
259 		uint16_t nb_ops, void *sess,
260 		const struct cperf_options *options,
261 		const struct cperf_test_vector *test_vector,
262 		uint16_t iv_offset, uint32_t *imix_idx,
263 		uint64_t *tsc_start __rte_unused)
264 {
265 	uint16_t i;
266 
267 	for (i = 0; i < nb_ops; i++) {
268 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
269 
270 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
271 		rte_crypto_op_attach_sym_session(ops[i], sess);
272 
273 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
274 							src_buf_offset);
275 
276 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
277 		if (dst_buf_offset == 0)
278 			sym_op->m_dst = NULL;
279 		else
280 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
281 							dst_buf_offset);
282 
283 		/* cipher parameters */
284 		if (options->imix_distribution_count) {
285 			sym_op->cipher.data.length =
286 				options->imix_buffer_sizes[*imix_idx];
287 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
288 		} else
289 			sym_op->cipher.data.length = options->test_buffer_size;
290 
291 		if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
292 				options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
293 				options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
294 			sym_op->cipher.data.length <<= 3;
295 
296 		sym_op->cipher.data.offset = 0;
297 	}
298 
299 	if (options->test == CPERF_TEST_TYPE_VERIFY) {
300 		for (i = 0; i < nb_ops; i++) {
301 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
302 					uint8_t *, iv_offset);
303 
304 			memcpy(iv_ptr, test_vector->cipher_iv.data,
305 					test_vector->cipher_iv.length);
306 
307 		}
308 	}
309 }
310 
311 static void
312 cperf_set_ops_auth(struct rte_crypto_op **ops,
313 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
314 		uint16_t nb_ops, void *sess,
315 		const struct cperf_options *options,
316 		const struct cperf_test_vector *test_vector,
317 		uint16_t iv_offset, uint32_t *imix_idx,
318 		uint64_t *tsc_start __rte_unused)
319 {
320 	uint16_t i;
321 
322 	for (i = 0; i < nb_ops; i++) {
323 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
324 
325 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
326 		rte_crypto_op_attach_sym_session(ops[i], sess);
327 
328 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
329 							src_buf_offset);
330 
331 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
332 		if (dst_buf_offset == 0)
333 			sym_op->m_dst = NULL;
334 		else
335 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
336 							dst_buf_offset);
337 
338 		if (test_vector->auth_iv.length) {
339 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
340 								uint8_t *,
341 								iv_offset);
342 			memcpy(iv_ptr, test_vector->auth_iv.data,
343 					test_vector->auth_iv.length);
344 		}
345 
346 		/* authentication parameters */
347 		if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
348 			sym_op->auth.digest.data = test_vector->digest.data;
349 			sym_op->auth.digest.phys_addr =
350 					test_vector->digest.phys_addr;
351 		} else {
352 
353 			uint32_t offset = options->test_buffer_size;
354 			struct rte_mbuf *buf, *tbuf;
355 
356 			if (options->out_of_place) {
357 				buf = sym_op->m_dst;
358 			} else {
359 				tbuf = sym_op->m_src;
360 				while ((tbuf->next != NULL) &&
361 						(offset >= tbuf->data_len)) {
362 					offset -= tbuf->data_len;
363 					tbuf = tbuf->next;
364 				}
365 				/*
366 				 * If there is not enough room in segment,
367 				 * place the digest in the next segment
368 				 */
369 				if ((tbuf->data_len - offset) < options->digest_sz) {
370 					tbuf = tbuf->next;
371 					offset = 0;
372 				}
373 				buf = tbuf;
374 			}
375 
376 			sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
377 					uint8_t *, offset);
378 			sym_op->auth.digest.phys_addr =
379 					rte_pktmbuf_iova_offset(buf, offset);
380 
381 		}
382 
383 		if (options->imix_distribution_count) {
384 			sym_op->auth.data.length =
385 				options->imix_buffer_sizes[*imix_idx];
386 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
387 		} else
388 			sym_op->auth.data.length = options->test_buffer_size;
389 
390 		if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
391 				options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
392 				options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3)
393 			sym_op->auth.data.length <<= 3;
394 
395 		sym_op->auth.data.offset = 0;
396 	}
397 
398 	if (options->test == CPERF_TEST_TYPE_VERIFY) {
399 		if (test_vector->auth_iv.length) {
400 			for (i = 0; i < nb_ops; i++) {
401 				uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
402 						uint8_t *, iv_offset);
403 
404 				memcpy(iv_ptr, test_vector->auth_iv.data,
405 						test_vector->auth_iv.length);
406 			}
407 		}
408 	}
409 }
410 
411 static void
412 cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
413 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
414 		uint16_t nb_ops, void *sess,
415 		const struct cperf_options *options,
416 		const struct cperf_test_vector *test_vector,
417 		uint16_t iv_offset, uint32_t *imix_idx,
418 		uint64_t *tsc_start __rte_unused)
419 {
420 	uint16_t i;
421 
422 	for (i = 0; i < nb_ops; i++) {
423 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
424 
425 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
426 		rte_crypto_op_attach_sym_session(ops[i], sess);
427 
428 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
429 							src_buf_offset);
430 
431 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
432 		if (dst_buf_offset == 0)
433 			sym_op->m_dst = NULL;
434 		else
435 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
436 							dst_buf_offset);
437 
438 		/* cipher parameters */
439 		if (options->imix_distribution_count) {
440 			sym_op->cipher.data.length =
441 				options->imix_buffer_sizes[*imix_idx];
442 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
443 		} else
444 			sym_op->cipher.data.length = options->test_buffer_size;
445 
446 		if ((options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE) &&
447 				(options->op_type == CPERF_AUTH_THEN_CIPHER))
448 			sym_op->cipher.data.length += options->digest_sz;
449 
450 		if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
451 				options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
452 				options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
453 			sym_op->cipher.data.length <<= 3;
454 
455 		sym_op->cipher.data.offset = 0;
456 
457 		/* authentication parameters */
458 		if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
459 			sym_op->auth.digest.data = test_vector->digest.data;
460 			sym_op->auth.digest.phys_addr =
461 					test_vector->digest.phys_addr;
462 		} else {
463 
464 			uint32_t offset = options->test_buffer_size;
465 			struct rte_mbuf *buf, *tbuf;
466 
467 			if (options->out_of_place) {
468 				buf = sym_op->m_dst;
469 			} else {
470 				tbuf = sym_op->m_src;
471 				while ((tbuf->next != NULL) &&
472 						(offset >= tbuf->data_len)) {
473 					offset -= tbuf->data_len;
474 					tbuf = tbuf->next;
475 				}
476 				/*
477 				 * If there is not enough room in segment,
478 				 * place the digest in the next segment
479 				 */
480 				if ((tbuf->data_len - offset) < options->digest_sz) {
481 					tbuf = tbuf->next;
482 					offset = 0;
483 				}
484 				buf = tbuf;
485 			}
486 
487 			sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
488 					uint8_t *, offset);
489 			sym_op->auth.digest.phys_addr =
490 					rte_pktmbuf_iova_offset(buf, offset);
491 		}
492 
493 		if (options->imix_distribution_count) {
494 			sym_op->auth.data.length =
495 				options->imix_buffer_sizes[*imix_idx];
496 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
497 		} else
498 			sym_op->auth.data.length = options->test_buffer_size;
499 
500 		if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
501 				options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
502 				options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3)
503 			sym_op->auth.data.length <<= 3;
504 
505 		sym_op->auth.data.offset = 0;
506 	}
507 
508 	if (options->test == CPERF_TEST_TYPE_VERIFY) {
509 		for (i = 0; i < nb_ops; i++) {
510 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
511 					uint8_t *, iv_offset);
512 
513 			memcpy(iv_ptr, test_vector->cipher_iv.data,
514 					test_vector->cipher_iv.length);
515 			if (test_vector->auth_iv.length) {
516 				/*
517 				 * Copy IV after the crypto operation and
518 				 * the cipher IV
519 				 */
520 				iv_ptr += test_vector->cipher_iv.length;
521 				memcpy(iv_ptr, test_vector->auth_iv.data,
522 						test_vector->auth_iv.length);
523 			}
524 		}
525 
526 	}
527 }
528 
529 static void
530 cperf_set_ops_aead(struct rte_crypto_op **ops,
531 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
532 		uint16_t nb_ops, void *sess,
533 		const struct cperf_options *options,
534 		const struct cperf_test_vector *test_vector,
535 		uint16_t iv_offset, uint32_t *imix_idx,
536 		uint64_t *tsc_start __rte_unused)
537 {
538 	uint16_t i;
539 	/* AAD is placed after the IV */
540 	uint16_t aad_offset = iv_offset +
541 			RTE_ALIGN_CEIL(test_vector->aead_iv.length, 16);
542 
543 	for (i = 0; i < nb_ops; i++) {
544 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
545 
546 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
547 		rte_crypto_op_attach_sym_session(ops[i], sess);
548 
549 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
550 							src_buf_offset);
551 
552 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
553 		if (dst_buf_offset == 0)
554 			sym_op->m_dst = NULL;
555 		else
556 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
557 							dst_buf_offset);
558 
559 		/* AEAD parameters */
560 		if (options->imix_distribution_count) {
561 			sym_op->aead.data.length =
562 				options->imix_buffer_sizes[*imix_idx];
563 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
564 		} else
565 			sym_op->aead.data.length = options->test_buffer_size;
566 		sym_op->aead.data.offset = 0;
567 
568 		sym_op->aead.aad.data = rte_crypto_op_ctod_offset(ops[i],
569 					uint8_t *, aad_offset);
570 		sym_op->aead.aad.phys_addr = rte_crypto_op_ctophys_offset(ops[i],
571 					aad_offset);
572 
573 		if (options->aead_op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
574 			sym_op->aead.digest.data = test_vector->digest.data;
575 			sym_op->aead.digest.phys_addr =
576 					test_vector->digest.phys_addr;
577 		} else {
578 
579 			uint32_t offset = sym_op->aead.data.length +
580 						sym_op->aead.data.offset;
581 			struct rte_mbuf *buf, *tbuf;
582 
583 			if (options->out_of_place) {
584 				buf = sym_op->m_dst;
585 			} else {
586 				tbuf = sym_op->m_src;
587 				while ((tbuf->next != NULL) &&
588 						(offset >= tbuf->data_len)) {
589 					offset -= tbuf->data_len;
590 					tbuf = tbuf->next;
591 				}
592 				/*
593 				 * If there is not enough room in segment,
594 				 * place the digest in the next segment
595 				 */
596 				if ((tbuf->data_len - offset) < options->digest_sz) {
597 					tbuf = tbuf->next;
598 					offset = 0;
599 				}
600 				buf = tbuf;
601 			}
602 
603 			sym_op->aead.digest.data = rte_pktmbuf_mtod_offset(buf,
604 					uint8_t *, offset);
605 			sym_op->aead.digest.phys_addr =
606 					rte_pktmbuf_iova_offset(buf, offset);
607 		}
608 	}
609 
610 	if ((options->test == CPERF_TEST_TYPE_VERIFY) ||
611 			(options->test == CPERF_TEST_TYPE_LATENCY)) {
612 		for (i = 0; i < nb_ops; i++) {
613 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
614 					uint8_t *, iv_offset);
615 
616 			/*
617 			 * If doing AES-CCM, nonce is copied one byte
618 			 * after the start of IV field, and AAD is copied
619 			 * 18 bytes after the start of the AAD field.
620 			 */
621 			if (options->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) {
622 				memcpy(iv_ptr + 1, test_vector->aead_iv.data,
623 					test_vector->aead_iv.length);
624 
625 				memcpy(ops[i]->sym->aead.aad.data + 18,
626 					test_vector->aad.data,
627 					test_vector->aad.length);
628 			} else {
629 				memcpy(iv_ptr, test_vector->aead_iv.data,
630 					test_vector->aead_iv.length);
631 
632 				memcpy(ops[i]->sym->aead.aad.data,
633 					test_vector->aad.data,
634 					test_vector->aad.length);
635 			}
636 		}
637 	}
638 }
639 
640 static void *
641 create_ipsec_session(struct rte_mempool *sess_mp,
642 		uint8_t dev_id,
643 		const struct cperf_options *options,
644 		const struct cperf_test_vector *test_vector,
645 		uint16_t iv_offset)
646 {
647 	struct rte_crypto_sym_xform auth_xform = {0};
648 	struct rte_crypto_sym_xform *crypto_xform;
649 	struct rte_crypto_sym_xform xform = {0};
650 
651 	if (options->aead_algo != 0) {
652 		/* Setup AEAD Parameters */
653 		xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
654 		xform.next = NULL;
655 		xform.aead.algo = options->aead_algo;
656 		xform.aead.op = options->aead_op;
657 		xform.aead.iv.offset = iv_offset;
658 		xform.aead.key.data = test_vector->aead_key.data;
659 		xform.aead.key.length = test_vector->aead_key.length;
660 		xform.aead.iv.length = test_vector->aead_iv.length;
661 		xform.aead.digest_length = options->digest_sz;
662 		xform.aead.aad_length = options->aead_aad_sz;
663 		crypto_xform = &xform;
664 	} else if (options->cipher_algo != 0 && options->auth_algo != 0) {
665 		/* Setup Cipher Parameters */
666 		xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
667 		xform.cipher.algo = options->cipher_algo;
668 		xform.cipher.op = options->cipher_op;
669 		xform.cipher.iv.offset = iv_offset;
670 		xform.cipher.iv.length = test_vector->cipher_iv.length;
671 		/* cipher different than null */
672 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
673 			xform.cipher.key.data = test_vector->cipher_key.data;
674 			xform.cipher.key.length =
675 				test_vector->cipher_key.length;
676 		} else {
677 			xform.cipher.key.data = NULL;
678 			xform.cipher.key.length = 0;
679 		}
680 
681 		/* Setup Auth Parameters */
682 		auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
683 		auth_xform.auth.algo = options->auth_algo;
684 		auth_xform.auth.op = options->auth_op;
685 		auth_xform.auth.iv.offset = iv_offset +
686 				xform.cipher.iv.length;
687 		/* auth different than null */
688 		if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
689 			auth_xform.auth.digest_length = options->digest_sz;
690 			auth_xform.auth.key.length =
691 						test_vector->auth_key.length;
692 			auth_xform.auth.key.data = test_vector->auth_key.data;
693 			auth_xform.auth.iv.length = test_vector->auth_iv.length;
694 		} else {
695 			auth_xform.auth.digest_length = 0;
696 			auth_xform.auth.key.length = 0;
697 			auth_xform.auth.key.data = NULL;
698 			auth_xform.auth.iv.length = 0;
699 		}
700 
701 		if (options->is_outbound) {
702 			crypto_xform = &xform;
703 			xform.next = &auth_xform;
704 			auth_xform.next = NULL;
705 		} else {
706 			crypto_xform = &auth_xform;
707 			auth_xform.next = &xform;
708 			xform.next = NULL;
709 		}
710 	} else {
711 		return NULL;
712 	}
713 
714 #define CPERF_IPSEC_SRC_IP	0x01010101
715 #define CPERF_IPSEC_DST_IP	0x02020202
716 #define CPERF_IPSEC_SALT	0x0
717 #define CPERF_IPSEC_DEFTTL	64
718 	struct rte_security_ipsec_tunnel_param tunnel = {
719 		.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4,
720 		{.ipv4 = {
721 			.src_ip = { .s_addr = CPERF_IPSEC_SRC_IP},
722 			.dst_ip = { .s_addr = CPERF_IPSEC_DST_IP},
723 			.dscp = 0,
724 			.df = 0,
725 			.ttl = CPERF_IPSEC_DEFTTL,
726 		} },
727 	};
728 	struct rte_security_session_conf sess_conf = {
729 		.action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
730 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
731 		{.ipsec = {
732 			.spi = rte_lcore_id() + 1,
733 			/**< For testing sake, lcore_id is taken as SPI so that
734 			 * for every core a different session is created.
735 			 */
736 			.salt = CPERF_IPSEC_SALT,
737 			.options = { 0 },
738 			.replay_win_sz = 0,
739 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
740 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
741 			.tunnel = tunnel,
742 		} },
743 		.userdata = NULL,
744 		.crypto_xform = crypto_xform,
745 	};
746 
747 	if (options->is_outbound)
748 		sess_conf.ipsec.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
749 	else
750 		sess_conf.ipsec.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
751 
752 	void *ctx = rte_cryptodev_get_sec_ctx(dev_id);
753 
754 	/* Create security session */
755 	return (void *)rte_security_session_create(ctx, &sess_conf, sess_mp);
756 }
757 
758 static void *
759 cperf_create_session(struct rte_mempool *sess_mp,
760 	uint8_t dev_id,
761 	const struct cperf_options *options,
762 	const struct cperf_test_vector *test_vector,
763 	uint16_t iv_offset)
764 {
765 	struct rte_crypto_sym_xform cipher_xform;
766 	struct rte_crypto_sym_xform auth_xform;
767 	struct rte_crypto_sym_xform aead_xform;
768 	void *sess = NULL;
769 	void *asym_sess = NULL;
770 	struct rte_crypto_asym_xform xform = {0};
771 	int ret;
772 
773 	if (options->op_type == CPERF_ASYM_MODEX) {
774 		xform.next = NULL;
775 		xform.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX;
776 		xform.modex.modulus.data = options->modex_data->modulus.data;
777 		xform.modex.modulus.length = options->modex_data->modulus.len;
778 		xform.modex.exponent.data = options->modex_data->exponent.data;
779 		xform.modex.exponent.length = options->modex_data->exponent.len;
780 
781 		ret = rte_cryptodev_asym_session_create(dev_id, &xform,
782 				sess_mp, &asym_sess);
783 		if (ret < 0) {
784 			RTE_LOG(ERR, USER1, "Asym session create failed\n");
785 			return NULL;
786 		}
787 		return asym_sess;
788 	}
789 #ifdef RTE_LIB_SECURITY
790 	/*
791 	 * security only
792 	 */
793 	if (options->op_type == CPERF_PDCP) {
794 		/* Setup Cipher Parameters */
795 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
796 		cipher_xform.next = NULL;
797 		cipher_xform.cipher.algo = options->cipher_algo;
798 		cipher_xform.cipher.op = options->cipher_op;
799 		cipher_xform.cipher.iv.offset = iv_offset;
800 		cipher_xform.cipher.iv.length = 4;
801 
802 		/* cipher different than null */
803 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
804 			cipher_xform.cipher.key.data = test_vector->cipher_key.data;
805 			cipher_xform.cipher.key.length = test_vector->cipher_key.length;
806 		} else {
807 			cipher_xform.cipher.key.data = NULL;
808 			cipher_xform.cipher.key.length = 0;
809 		}
810 
811 		/* Setup Auth Parameters */
812 		if (options->auth_algo != 0) {
813 			auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
814 			auth_xform.next = NULL;
815 			auth_xform.auth.algo = options->auth_algo;
816 			auth_xform.auth.op = options->auth_op;
817 			auth_xform.auth.iv.offset = iv_offset +
818 				cipher_xform.cipher.iv.length;
819 
820 			/* auth different than null */
821 			if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
822 				auth_xform.auth.digest_length = options->digest_sz;
823 				auth_xform.auth.key.length = test_vector->auth_key.length;
824 				auth_xform.auth.key.data = test_vector->auth_key.data;
825 				auth_xform.auth.iv.length = test_vector->auth_iv.length;
826 			} else {
827 				auth_xform.auth.digest_length = 0;
828 				auth_xform.auth.key.length = 0;
829 				auth_xform.auth.key.data = NULL;
830 				auth_xform.auth.iv.length = 0;
831 			}
832 
833 			cipher_xform.next = &auth_xform;
834 		} else {
835 			cipher_xform.next = NULL;
836 		}
837 
838 		struct rte_security_session_conf sess_conf = {
839 			.action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
840 			.protocol = RTE_SECURITY_PROTOCOL_PDCP,
841 			{.pdcp = {
842 				.bearer = 0x16,
843 				.domain = options->pdcp_domain,
844 				.pkt_dir = 0,
845 				.sn_size = options->pdcp_sn_sz,
846 				.hfn = options->pdcp_ses_hfn_en ?
847 					PDCP_DEFAULT_HFN : 0,
848 				.hfn_threshold = 0x70C0A,
849 				.sdap_enabled = options->pdcp_sdap,
850 				.hfn_ovrd = !(options->pdcp_ses_hfn_en),
851 			} },
852 			.crypto_xform = &cipher_xform
853 		};
854 
855 		void *ctx = rte_cryptodev_get_sec_ctx(dev_id);
856 
857 		/* Create security session */
858 		return (void *)rte_security_session_create(ctx, &sess_conf, sess_mp);
859 	}
860 
861 	if (options->op_type == CPERF_IPSEC) {
862 		return create_ipsec_session(sess_mp, dev_id,
863 				options, test_vector, iv_offset);
864 	}
865 
866 	if (options->op_type == CPERF_DOCSIS) {
867 		enum rte_security_docsis_direction direction;
868 
869 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
870 		cipher_xform.next = NULL;
871 		cipher_xform.cipher.algo = options->cipher_algo;
872 		cipher_xform.cipher.op = options->cipher_op;
873 		cipher_xform.cipher.iv.offset = iv_offset;
874 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
875 			cipher_xform.cipher.key.data =
876 				test_vector->cipher_key.data;
877 			cipher_xform.cipher.key.length =
878 				test_vector->cipher_key.length;
879 			cipher_xform.cipher.iv.length =
880 				test_vector->cipher_iv.length;
881 		} else {
882 			cipher_xform.cipher.key.data = NULL;
883 			cipher_xform.cipher.key.length = 0;
884 			cipher_xform.cipher.iv.length = 0;
885 		}
886 		cipher_xform.next = NULL;
887 
888 		if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
889 			direction = RTE_SECURITY_DOCSIS_DOWNLINK;
890 		else
891 			direction = RTE_SECURITY_DOCSIS_UPLINK;
892 
893 		struct rte_security_session_conf sess_conf = {
894 			.action_type =
895 				RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
896 			.protocol = RTE_SECURITY_PROTOCOL_DOCSIS,
897 			{.docsis = {
898 				.direction = direction,
899 			} },
900 			.crypto_xform = &cipher_xform
901 		};
902 		void *ctx = rte_cryptodev_get_sec_ctx(dev_id);
903 
904 		/* Create security session */
905 		return (void *)rte_security_session_create(ctx, &sess_conf, sess_mp);
906 	}
907 #endif
908 	/*
909 	 * cipher only
910 	 */
911 	if (options->op_type == CPERF_CIPHER_ONLY) {
912 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
913 		cipher_xform.next = NULL;
914 		cipher_xform.cipher.algo = options->cipher_algo;
915 		cipher_xform.cipher.op = options->cipher_op;
916 		cipher_xform.cipher.iv.offset = iv_offset;
917 
918 		/* cipher different than null */
919 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
920 			cipher_xform.cipher.key.data =
921 					test_vector->cipher_key.data;
922 			cipher_xform.cipher.key.length =
923 					test_vector->cipher_key.length;
924 			cipher_xform.cipher.iv.length =
925 					test_vector->cipher_iv.length;
926 		} else {
927 			cipher_xform.cipher.key.data = NULL;
928 			cipher_xform.cipher.key.length = 0;
929 			cipher_xform.cipher.iv.length = 0;
930 		}
931 		/* create crypto session */
932 		sess = rte_cryptodev_sym_session_create(dev_id, &cipher_xform,
933 				sess_mp);
934 	/*
935 	 *  auth only
936 	 */
937 	} else if (options->op_type == CPERF_AUTH_ONLY) {
938 		auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
939 		auth_xform.next = NULL;
940 		auth_xform.auth.algo = options->auth_algo;
941 		auth_xform.auth.op = options->auth_op;
942 		auth_xform.auth.iv.offset = iv_offset;
943 
944 		/* auth different than null */
945 		if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
946 			auth_xform.auth.digest_length =
947 					options->digest_sz;
948 			auth_xform.auth.key.length =
949 					test_vector->auth_key.length;
950 			auth_xform.auth.key.data = test_vector->auth_key.data;
951 			auth_xform.auth.iv.length =
952 					test_vector->auth_iv.length;
953 		} else {
954 			auth_xform.auth.digest_length = 0;
955 			auth_xform.auth.key.length = 0;
956 			auth_xform.auth.key.data = NULL;
957 			auth_xform.auth.iv.length = 0;
958 		}
959 		/* create crypto session */
960 		sess = rte_cryptodev_sym_session_create(dev_id, &auth_xform,
961 				sess_mp);
962 	/*
963 	 * cipher and auth
964 	 */
965 	} else if (options->op_type == CPERF_CIPHER_THEN_AUTH
966 			|| options->op_type == CPERF_AUTH_THEN_CIPHER) {
967 		/*
968 		 * cipher
969 		 */
970 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
971 		cipher_xform.next = NULL;
972 		cipher_xform.cipher.algo = options->cipher_algo;
973 		cipher_xform.cipher.op = options->cipher_op;
974 		cipher_xform.cipher.iv.offset = iv_offset;
975 
976 		/* cipher different than null */
977 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
978 			cipher_xform.cipher.key.data =
979 					test_vector->cipher_key.data;
980 			cipher_xform.cipher.key.length =
981 					test_vector->cipher_key.length;
982 			cipher_xform.cipher.iv.length =
983 					test_vector->cipher_iv.length;
984 		} else {
985 			cipher_xform.cipher.key.data = NULL;
986 			cipher_xform.cipher.key.length = 0;
987 			cipher_xform.cipher.iv.length = 0;
988 		}
989 
990 		/*
991 		 * auth
992 		 */
993 		auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
994 		auth_xform.next = NULL;
995 		auth_xform.auth.algo = options->auth_algo;
996 		auth_xform.auth.op = options->auth_op;
997 		auth_xform.auth.iv.offset = iv_offset +
998 			cipher_xform.cipher.iv.length;
999 
1000 		/* auth different than null */
1001 		if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
1002 			auth_xform.auth.digest_length = options->digest_sz;
1003 			auth_xform.auth.iv.length = test_vector->auth_iv.length;
1004 			auth_xform.auth.key.length =
1005 					test_vector->auth_key.length;
1006 			auth_xform.auth.key.data =
1007 					test_vector->auth_key.data;
1008 		} else {
1009 			auth_xform.auth.digest_length = 0;
1010 			auth_xform.auth.key.length = 0;
1011 			auth_xform.auth.key.data = NULL;
1012 			auth_xform.auth.iv.length = 0;
1013 		}
1014 
1015 		/* cipher then auth */
1016 		if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
1017 			cipher_xform.next = &auth_xform;
1018 			/* create crypto session */
1019 			sess = rte_cryptodev_sym_session_create(dev_id,
1020 					&cipher_xform, sess_mp);
1021 		} else { /* auth then cipher */
1022 			auth_xform.next = &cipher_xform;
1023 			/* create crypto session */
1024 			sess = rte_cryptodev_sym_session_create(dev_id,
1025 					&auth_xform, sess_mp);
1026 		}
1027 	} else { /* options->op_type == CPERF_AEAD */
1028 		aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1029 		aead_xform.next = NULL;
1030 		aead_xform.aead.algo = options->aead_algo;
1031 		aead_xform.aead.op = options->aead_op;
1032 		aead_xform.aead.iv.offset = iv_offset;
1033 
1034 		aead_xform.aead.key.data =
1035 					test_vector->aead_key.data;
1036 		aead_xform.aead.key.length =
1037 					test_vector->aead_key.length;
1038 		aead_xform.aead.iv.length = test_vector->aead_iv.length;
1039 
1040 		aead_xform.aead.digest_length = options->digest_sz;
1041 		aead_xform.aead.aad_length =
1042 					options->aead_aad_sz;
1043 
1044 		/* Create crypto session */
1045 		sess = rte_cryptodev_sym_session_create(dev_id, &aead_xform,
1046 				sess_mp);
1047 	}
1048 
1049 	return sess;
1050 }
1051 
1052 int
1053 cperf_get_op_functions(const struct cperf_options *options,
1054 		struct cperf_op_fns *op_fns)
1055 {
1056 	memset(op_fns, 0, sizeof(struct cperf_op_fns));
1057 
1058 	op_fns->sess_create = cperf_create_session;
1059 
1060 	switch (options->op_type) {
1061 	case CPERF_AEAD:
1062 		op_fns->populate_ops = cperf_set_ops_aead;
1063 		break;
1064 
1065 	case CPERF_AUTH_THEN_CIPHER:
1066 	case CPERF_CIPHER_THEN_AUTH:
1067 		op_fns->populate_ops = cperf_set_ops_cipher_auth;
1068 		break;
1069 	case CPERF_AUTH_ONLY:
1070 		if (options->auth_algo == RTE_CRYPTO_AUTH_NULL)
1071 			op_fns->populate_ops = cperf_set_ops_null_auth;
1072 		else
1073 			op_fns->populate_ops = cperf_set_ops_auth;
1074 		break;
1075 	case CPERF_CIPHER_ONLY:
1076 		if (options->cipher_algo == RTE_CRYPTO_CIPHER_NULL)
1077 			op_fns->populate_ops = cperf_set_ops_null_cipher;
1078 		else
1079 			op_fns->populate_ops = cperf_set_ops_cipher;
1080 		break;
1081 	case CPERF_ASYM_MODEX:
1082 		op_fns->populate_ops = cperf_set_ops_asym;
1083 		break;
1084 #ifdef RTE_LIB_SECURITY
1085 	case CPERF_PDCP:
1086 	case CPERF_DOCSIS:
1087 		op_fns->populate_ops = cperf_set_ops_security;
1088 		break;
1089 	case CPERF_IPSEC:
1090 		op_fns->populate_ops = cperf_set_ops_security_ipsec;
1091 		break;
1092 #endif
1093 	default:
1094 		return -1;
1095 	}
1096 
1097 	return 0;
1098 }
1099