xref: /dpdk/app/test-crypto-perf/cperf_ops.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <rte_ether.h>
7 #include <rte_ip.h>
8 
9 #include "cperf_ops.h"
10 #include "cperf_test_vectors.h"
11 
12 static void
13 cperf_set_ops_asym_modex(struct rte_crypto_op **ops,
14 		   uint32_t src_buf_offset __rte_unused,
15 		   uint32_t dst_buf_offset __rte_unused, uint16_t nb_ops,
16 		   void *sess,
17 		   const struct cperf_options *options,
18 		   const struct cperf_test_vector *test_vector __rte_unused,
19 		   uint16_t iv_offset __rte_unused,
20 		   uint32_t *imix_idx __rte_unused,
21 		   uint64_t *tsc_start __rte_unused)
22 {
23 	uint16_t i;
24 
25 	for (i = 0; i < nb_ops; i++) {
26 		struct rte_crypto_asym_op *asym_op = ops[i]->asym;
27 
28 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
29 		asym_op->modex.base.data = options->modex_data->base.data;
30 		asym_op->modex.base.length = options->modex_data->base.len;
31 		asym_op->modex.result.data = options->modex_data->result.data;
32 		asym_op->modex.result.length = options->modex_data->result.len;
33 		rte_crypto_op_attach_asym_session(ops[i], sess);
34 	}
35 }
36 
37 static void
38 cperf_set_ops_asym_ecdsa(struct rte_crypto_op **ops,
39 		   uint32_t src_buf_offset __rte_unused,
40 		   uint32_t dst_buf_offset __rte_unused, uint16_t nb_ops,
41 		   void *sess,
42 		   const struct cperf_options *options,
43 		   const struct cperf_test_vector *test_vector __rte_unused,
44 		   uint16_t iv_offset __rte_unused,
45 		   uint32_t *imix_idx __rte_unused,
46 		   uint64_t *tsc_start __rte_unused)
47 {
48 	uint16_t i;
49 
50 	for (i = 0; i < nb_ops; i++) {
51 		struct rte_crypto_asym_op *asym_op = ops[i]->asym;
52 
53 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
54 		rte_crypto_op_attach_asym_session(ops[i], sess);
55 
56 		asym_op->ecdsa.op_type = options->asym_op_type;
57 		asym_op->ecdsa.message.data = options->secp256r1_data->message.data;
58 		asym_op->ecdsa.message.length = options->secp256r1_data->message.length;
59 
60 		asym_op->ecdsa.k.data = options->secp256r1_data->k.data;
61 		asym_op->ecdsa.k.length = options->secp256r1_data->k.length;
62 
63 		asym_op->ecdsa.r.data = options->secp256r1_data->sign_r.data;
64 		asym_op->ecdsa.r.length = options->secp256r1_data->sign_r.length;
65 		asym_op->ecdsa.s.data = options->secp256r1_data->sign_s.data;
66 		asym_op->ecdsa.s.length = options->secp256r1_data->sign_s.length;
67 	}
68 }
69 
70 static void
71 cperf_set_ops_asym_sm2(struct rte_crypto_op **ops,
72 		   uint32_t src_buf_offset __rte_unused,
73 		   uint32_t dst_buf_offset __rte_unused, uint16_t nb_ops,
74 		   void *sess,
75 		   const struct cperf_options *options,
76 		   const struct cperf_test_vector *test_vector __rte_unused,
77 		   uint16_t iv_offset __rte_unused,
78 		   uint32_t *imix_idx __rte_unused,
79 		   uint64_t *tsc_start __rte_unused)
80 {
81 	uint16_t i;
82 
83 	for (i = 0; i < nb_ops; i++) {
84 		struct rte_crypto_asym_op *asym_op = ops[i]->asym;
85 
86 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
87 		rte_crypto_op_attach_asym_session(ops[i], sess);
88 
89 		/* Populate op with operational details */
90 		asym_op->sm2.hash = options->asym_hash_alg;
91 
92 		asym_op->sm2.op_type = options->asym_op_type;
93 		asym_op->sm2.message.data = options->sm2_data->message.data;
94 		asym_op->sm2.message.length = options->sm2_data->message.length;
95 		asym_op->sm2.cipher.data = options->sm2_data->cipher.data;
96 		asym_op->sm2.cipher.length = options->sm2_data->cipher.length;
97 		asym_op->sm2.id.data = options->sm2_data->id.data;
98 		asym_op->sm2.id.length = options->sm2_data->id.length;
99 
100 		asym_op->sm2.k.data = options->sm2_data->k.data;
101 		asym_op->sm2.k.length = options->sm2_data->k.length;
102 
103 		asym_op->sm2.r.data = options->sm2_data->sign_r.data;
104 		asym_op->sm2.r.length = options->sm2_data->sign_r.length;
105 		asym_op->sm2.s.data = options->sm2_data->sign_s.data;
106 		asym_op->sm2.s.length = options->sm2_data->sign_s.length;
107 	}
108 }
109 
110 
111 #ifdef RTE_LIB_SECURITY
112 static void
113 test_ipsec_vec_populate(struct rte_mbuf *m, const struct cperf_options *options,
114 			const struct cperf_test_vector *test_vector)
115 {
116 	struct rte_ipv4_hdr *ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *);
117 
118 	if (options->is_outbound) {
119 		memcpy(ip, test_vector->plaintext.data, sizeof(struct rte_ipv4_hdr));
120 		ip->total_length = rte_cpu_to_be_16(m->pkt_len);
121 	}
122 }
123 
124 static void
125 cperf_set_ops_security(struct rte_crypto_op **ops,
126 		uint32_t src_buf_offset __rte_unused,
127 		uint32_t dst_buf_offset __rte_unused,
128 		uint16_t nb_ops, void *sess,
129 		const struct cperf_options *options,
130 		const struct cperf_test_vector *test_vector,
131 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
132 		uint64_t *tsc_start)
133 {
134 	uint16_t i;
135 
136 	for (i = 0; i < nb_ops; i++) {
137 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
138 		uint32_t buf_sz;
139 
140 		uint32_t *per_pkt_hfn = rte_crypto_op_ctod_offset(ops[i],
141 					uint32_t *, iv_offset);
142 		*per_pkt_hfn = options->pdcp_ses_hfn_en ? 0 : PDCP_DEFAULT_HFN;
143 
144 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
145 		rte_security_attach_session(ops[i], sess);
146 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
147 							src_buf_offset);
148 
149 		if (options->op_type == CPERF_PDCP) {
150 			sym_op->m_src->buf_len = options->segment_sz;
151 			sym_op->m_src->data_len = options->test_buffer_size;
152 			sym_op->m_src->pkt_len = sym_op->m_src->data_len;
153 		}
154 
155 		if (options->op_type == CPERF_DOCSIS) {
156 			if (options->imix_distribution_count) {
157 				buf_sz = options->imix_buffer_sizes[*imix_idx];
158 				*imix_idx = (*imix_idx + 1) % options->pool_sz;
159 			} else
160 				buf_sz = options->test_buffer_size;
161 
162 			sym_op->m_src->buf_len = options->segment_sz;
163 			sym_op->m_src->data_len = buf_sz;
164 			sym_op->m_src->pkt_len = buf_sz;
165 
166 			/* DOCSIS header is not CRC'ed */
167 			sym_op->auth.data.offset = options->docsis_hdr_sz;
168 			sym_op->auth.data.length = buf_sz -
169 				sym_op->auth.data.offset - RTE_ETHER_CRC_LEN;
170 			/*
171 			 * DOCSIS header and SRC and DST MAC addresses are not
172 			 * ciphered
173 			 */
174 			sym_op->cipher.data.offset = sym_op->auth.data.offset +
175 				RTE_ETHER_HDR_LEN - RTE_ETHER_TYPE_LEN;
176 			sym_op->cipher.data.length = buf_sz -
177 				sym_op->cipher.data.offset;
178 		}
179 
180 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
181 		if (dst_buf_offset == 0)
182 			sym_op->m_dst = NULL;
183 		else
184 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
185 							dst_buf_offset);
186 	}
187 
188 	RTE_SET_USED(tsc_start);
189 	RTE_SET_USED(test_vector);
190 }
191 
192 static void
193 cperf_set_ops_security_ipsec(struct rte_crypto_op **ops,
194 		uint32_t src_buf_offset __rte_unused,
195 		uint32_t dst_buf_offset __rte_unused,
196 		uint16_t nb_ops, void *sess,
197 		const struct cperf_options *options,
198 		const struct cperf_test_vector *test_vector,
199 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
200 		uint64_t *tsc_start)
201 {
202 	const uint32_t test_buffer_size = options->test_buffer_size;
203 	uint64_t tsc_start_temp, tsc_end_temp;
204 	uint16_t i = 0;
205 
206 	RTE_SET_USED(imix_idx);
207 
208 	for (i = 0; i < nb_ops; i++) {
209 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
210 		struct rte_mbuf *m = sym_op->m_src;
211 		uint32_t offset = test_buffer_size;
212 
213 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
214 		rte_security_attach_session(ops[i], sess);
215 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] + src_buf_offset);
216 		sym_op->m_src->pkt_len = test_buffer_size;
217 
218 		while ((m->next != NULL) && (offset >= m->data_len)) {
219 			offset -= m->data_len;
220 			m = m->next;
221 		}
222 		m->data_len = offset;
223 		/*
224 		 * If there is not enough room in segment,
225 		 * place the digest in the next segment
226 		 */
227 		if (rte_pktmbuf_tailroom(m) < options->digest_sz) {
228 			m = m->next;
229 			offset = 0;
230 		}
231 		m->next = NULL;
232 
233 		sym_op->m_dst = NULL;
234 	}
235 
236 	if (options->test_file != NULL)
237 		return;
238 
239 	tsc_start_temp = rte_rdtsc_precise();
240 
241 	for (i = 0; i < nb_ops; i++) {
242 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
243 		struct rte_mbuf *m = sym_op->m_src;
244 
245 		test_ipsec_vec_populate(m, options, test_vector);
246 	}
247 
248 	tsc_end_temp = rte_rdtsc_precise();
249 	*tsc_start += tsc_end_temp - tsc_start_temp;
250 }
251 
252 static void
253 cperf_set_ops_security_tls(struct rte_crypto_op **ops,
254 		uint32_t src_buf_offset __rte_unused,
255 		uint32_t dst_buf_offset __rte_unused,
256 		uint16_t nb_ops, void *sess,
257 		const struct cperf_options *options,
258 		const struct cperf_test_vector *test_vector,
259 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
260 		uint64_t *tsc_start)
261 {
262 	const uint32_t test_buffer_size = options->test_buffer_size;
263 	uint16_t i = 0;
264 
265 	RTE_SET_USED(imix_idx);
266 	RTE_SET_USED(tsc_start);
267 	RTE_SET_USED(test_vector);
268 
269 	for (i = 0; i < nb_ops; i++) {
270 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
271 		struct rte_mbuf *m = sym_op->m_src;
272 		uint32_t offset = test_buffer_size;
273 
274 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
275 		ops[i]->param1.tls_record.content_type = 0x17;
276 		rte_security_attach_session(ops[i], sess);
277 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] + src_buf_offset);
278 		sym_op->m_src->pkt_len = test_buffer_size;
279 
280 		while ((m->next != NULL) && (offset >= m->data_len)) {
281 			offset -= m->data_len;
282 			m = m->next;
283 		}
284 		m->data_len = offset;
285 		/*
286 		 * If there is not enough room in segment,
287 		 * place the digest in the next segment
288 		 */
289 		if ((rte_pktmbuf_tailroom(m)) < options->digest_sz) {
290 			m = m->next;
291 			m->data_len = 0;
292 		}
293 		m->next = NULL;
294 
295 		sym_op->m_dst = NULL;
296 	}
297 }
298 #endif
299 
300 static void
301 cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
302 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
303 		uint16_t nb_ops, void *sess,
304 		const struct cperf_options *options,
305 		const struct cperf_test_vector *test_vector __rte_unused,
306 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
307 		uint64_t *tsc_start __rte_unused)
308 {
309 	uint16_t i;
310 
311 	for (i = 0; i < nb_ops; i++) {
312 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
313 
314 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
315 		rte_crypto_op_attach_sym_session(ops[i], sess);
316 
317 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
318 							src_buf_offset);
319 
320 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
321 		if (dst_buf_offset == 0)
322 			sym_op->m_dst = NULL;
323 		else
324 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
325 							dst_buf_offset);
326 
327 		/* cipher parameters */
328 		if (options->imix_distribution_count) {
329 			sym_op->cipher.data.length =
330 				options->imix_buffer_sizes[*imix_idx];
331 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
332 		} else
333 			sym_op->cipher.data.length = options->test_buffer_size;
334 		sym_op->cipher.data.offset = 0;
335 	}
336 }
337 
338 static void
339 cperf_set_ops_null_auth(struct rte_crypto_op **ops,
340 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
341 		uint16_t nb_ops, void *sess,
342 		const struct cperf_options *options,
343 		const struct cperf_test_vector *test_vector __rte_unused,
344 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
345 		uint64_t *tsc_start __rte_unused)
346 {
347 	uint16_t i;
348 
349 	for (i = 0; i < nb_ops; i++) {
350 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
351 
352 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
353 		rte_crypto_op_attach_sym_session(ops[i], sess);
354 
355 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
356 							src_buf_offset);
357 
358 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
359 		if (dst_buf_offset == 0)
360 			sym_op->m_dst = NULL;
361 		else
362 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
363 							dst_buf_offset);
364 
365 		/* auth parameters */
366 		if (options->imix_distribution_count) {
367 			sym_op->auth.data.length =
368 				options->imix_buffer_sizes[*imix_idx];
369 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
370 		} else
371 			sym_op->auth.data.length = options->test_buffer_size;
372 		sym_op->auth.data.offset = 0;
373 	}
374 }
375 
376 static void
377 cperf_set_ops_cipher(struct rte_crypto_op **ops,
378 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
379 		uint16_t nb_ops, void *sess,
380 		const struct cperf_options *options,
381 		const struct cperf_test_vector *test_vector,
382 		uint16_t iv_offset, uint32_t *imix_idx,
383 		uint64_t *tsc_start __rte_unused)
384 {
385 	uint16_t i;
386 
387 	for (i = 0; i < nb_ops; i++) {
388 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
389 
390 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
391 		rte_crypto_op_attach_sym_session(ops[i], sess);
392 
393 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
394 							src_buf_offset);
395 
396 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
397 		if (dst_buf_offset == 0)
398 			sym_op->m_dst = NULL;
399 		else
400 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
401 							dst_buf_offset);
402 
403 		/* cipher parameters */
404 		if (options->imix_distribution_count) {
405 			sym_op->cipher.data.length =
406 				options->imix_buffer_sizes[*imix_idx];
407 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
408 		} else
409 			sym_op->cipher.data.length = options->test_buffer_size;
410 
411 		if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
412 				options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
413 				options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
414 			sym_op->cipher.data.length <<= 3;
415 
416 		sym_op->cipher.data.offset = 0;
417 	}
418 
419 	if (options->test == CPERF_TEST_TYPE_VERIFY) {
420 		for (i = 0; i < nb_ops; i++) {
421 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
422 					uint8_t *, iv_offset);
423 
424 			memcpy(iv_ptr, test_vector->cipher_iv.data,
425 					test_vector->cipher_iv.length);
426 
427 		}
428 	}
429 }
430 
431 static void
432 cperf_set_ops_auth(struct rte_crypto_op **ops,
433 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
434 		uint16_t nb_ops, void *sess,
435 		const struct cperf_options *options,
436 		const struct cperf_test_vector *test_vector,
437 		uint16_t iv_offset, uint32_t *imix_idx,
438 		uint64_t *tsc_start __rte_unused)
439 {
440 	uint16_t i;
441 
442 	for (i = 0; i < nb_ops; i++) {
443 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
444 
445 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
446 		rte_crypto_op_attach_sym_session(ops[i], sess);
447 
448 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
449 							src_buf_offset);
450 
451 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
452 		if (dst_buf_offset == 0)
453 			sym_op->m_dst = NULL;
454 		else
455 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
456 							dst_buf_offset);
457 
458 		if (test_vector->auth_iv.length) {
459 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
460 								uint8_t *,
461 								iv_offset);
462 			memcpy(iv_ptr, test_vector->auth_iv.data,
463 					test_vector->auth_iv.length);
464 		}
465 
466 		/* authentication parameters */
467 		if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
468 			sym_op->auth.digest.data = test_vector->digest.data;
469 			sym_op->auth.digest.phys_addr =
470 					test_vector->digest.phys_addr;
471 		} else {
472 
473 			uint32_t offset = options->test_buffer_size;
474 			struct rte_mbuf *buf, *tbuf;
475 
476 			if (options->out_of_place) {
477 				buf = sym_op->m_dst;
478 			} else {
479 				tbuf = sym_op->m_src;
480 				while ((tbuf->next != NULL) &&
481 						(offset >= tbuf->data_len)) {
482 					offset -= tbuf->data_len;
483 					tbuf = tbuf->next;
484 				}
485 				/*
486 				 * If there is not enough room in segment,
487 				 * place the digest in the next segment
488 				 */
489 				if ((tbuf->data_len - offset) < options->digest_sz) {
490 					tbuf = tbuf->next;
491 					offset = 0;
492 				}
493 				buf = tbuf;
494 			}
495 
496 			sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
497 					uint8_t *, offset);
498 			sym_op->auth.digest.phys_addr =
499 					rte_pktmbuf_iova_offset(buf, offset);
500 
501 		}
502 
503 		if (options->imix_distribution_count) {
504 			sym_op->auth.data.length =
505 				options->imix_buffer_sizes[*imix_idx];
506 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
507 		} else
508 			sym_op->auth.data.length = options->test_buffer_size;
509 
510 		if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
511 				options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
512 				options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3)
513 			sym_op->auth.data.length <<= 3;
514 
515 		sym_op->auth.data.offset = 0;
516 	}
517 
518 	if (options->test == CPERF_TEST_TYPE_VERIFY) {
519 		if (test_vector->auth_iv.length) {
520 			for (i = 0; i < nb_ops; i++) {
521 				uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
522 						uint8_t *, iv_offset);
523 
524 				memcpy(iv_ptr, test_vector->auth_iv.data,
525 						test_vector->auth_iv.length);
526 			}
527 		}
528 	}
529 }
530 
531 static void
532 cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
533 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
534 		uint16_t nb_ops, void *sess,
535 		const struct cperf_options *options,
536 		const struct cperf_test_vector *test_vector,
537 		uint16_t iv_offset, uint32_t *imix_idx,
538 		uint64_t *tsc_start __rte_unused)
539 {
540 	uint16_t i;
541 
542 	for (i = 0; i < nb_ops; i++) {
543 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
544 
545 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
546 		rte_crypto_op_attach_sym_session(ops[i], sess);
547 
548 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
549 							src_buf_offset);
550 
551 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
552 		if (dst_buf_offset == 0)
553 			sym_op->m_dst = NULL;
554 		else
555 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
556 							dst_buf_offset);
557 
558 		/* cipher parameters */
559 		if (options->imix_distribution_count) {
560 			sym_op->cipher.data.length =
561 				options->imix_buffer_sizes[*imix_idx];
562 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
563 		} else
564 			sym_op->cipher.data.length = options->test_buffer_size;
565 
566 		if ((options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE) &&
567 				(options->op_type == CPERF_AUTH_THEN_CIPHER))
568 			sym_op->cipher.data.length += options->digest_sz;
569 
570 		if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
571 				options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
572 				options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
573 			sym_op->cipher.data.length <<= 3;
574 
575 		sym_op->cipher.data.offset = 0;
576 
577 		/* authentication parameters */
578 		if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
579 			sym_op->auth.digest.data = test_vector->digest.data;
580 			sym_op->auth.digest.phys_addr =
581 					test_vector->digest.phys_addr;
582 		} else {
583 
584 			uint32_t offset = options->test_buffer_size;
585 			struct rte_mbuf *buf, *tbuf;
586 
587 			if (options->out_of_place) {
588 				buf = sym_op->m_dst;
589 			} else {
590 				tbuf = sym_op->m_src;
591 				while ((tbuf->next != NULL) &&
592 						(offset >= tbuf->data_len)) {
593 					offset -= tbuf->data_len;
594 					tbuf = tbuf->next;
595 				}
596 				/*
597 				 * If there is not enough room in segment,
598 				 * place the digest in the next segment
599 				 */
600 				if ((tbuf->data_len - offset) < options->digest_sz) {
601 					tbuf = tbuf->next;
602 					offset = 0;
603 				}
604 				buf = tbuf;
605 			}
606 
607 			sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
608 					uint8_t *, offset);
609 			sym_op->auth.digest.phys_addr =
610 					rte_pktmbuf_iova_offset(buf, offset);
611 		}
612 
613 		if (options->imix_distribution_count) {
614 			sym_op->auth.data.length =
615 				options->imix_buffer_sizes[*imix_idx];
616 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
617 		} else
618 			sym_op->auth.data.length = options->test_buffer_size;
619 
620 		if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
621 				options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
622 				options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3)
623 			sym_op->auth.data.length <<= 3;
624 
625 		sym_op->auth.data.offset = 0;
626 	}
627 
628 	if (options->test == CPERF_TEST_TYPE_VERIFY) {
629 		for (i = 0; i < nb_ops; i++) {
630 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
631 					uint8_t *, iv_offset);
632 
633 			memcpy(iv_ptr, test_vector->cipher_iv.data,
634 					test_vector->cipher_iv.length);
635 			if (test_vector->auth_iv.length) {
636 				/*
637 				 * Copy IV after the crypto operation and
638 				 * the cipher IV
639 				 */
640 				iv_ptr += test_vector->cipher_iv.length;
641 				memcpy(iv_ptr, test_vector->auth_iv.data,
642 						test_vector->auth_iv.length);
643 			}
644 		}
645 
646 	}
647 }
648 
649 static void
650 cperf_set_ops_aead(struct rte_crypto_op **ops,
651 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
652 		uint16_t nb_ops, void *sess,
653 		const struct cperf_options *options,
654 		const struct cperf_test_vector *test_vector,
655 		uint16_t iv_offset, uint32_t *imix_idx,
656 		uint64_t *tsc_start __rte_unused)
657 {
658 	uint16_t i;
659 	/* AAD is placed after the IV */
660 	uint16_t aad_offset = iv_offset +
661 			RTE_ALIGN_CEIL(test_vector->aead_iv.length, 16);
662 
663 	for (i = 0; i < nb_ops; i++) {
664 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
665 
666 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
667 		rte_crypto_op_attach_sym_session(ops[i], sess);
668 
669 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
670 							src_buf_offset);
671 
672 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
673 		if (dst_buf_offset == 0)
674 			sym_op->m_dst = NULL;
675 		else
676 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
677 							dst_buf_offset);
678 
679 		/* AEAD parameters */
680 		if (options->imix_distribution_count) {
681 			sym_op->aead.data.length =
682 				options->imix_buffer_sizes[*imix_idx];
683 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
684 		} else
685 			sym_op->aead.data.length = options->test_buffer_size;
686 		sym_op->aead.data.offset = 0;
687 
688 		sym_op->aead.aad.data = rte_crypto_op_ctod_offset(ops[i],
689 					uint8_t *, aad_offset);
690 		sym_op->aead.aad.phys_addr = rte_crypto_op_ctophys_offset(ops[i],
691 					aad_offset);
692 
693 		if (options->aead_op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
694 			sym_op->aead.digest.data = test_vector->digest.data;
695 			sym_op->aead.digest.phys_addr =
696 					test_vector->digest.phys_addr;
697 		} else {
698 
699 			uint32_t offset = sym_op->aead.data.length +
700 						sym_op->aead.data.offset;
701 			struct rte_mbuf *buf, *tbuf;
702 
703 			if (options->out_of_place) {
704 				buf = sym_op->m_dst;
705 			} else {
706 				tbuf = sym_op->m_src;
707 				while ((tbuf->next != NULL) &&
708 						(offset >= tbuf->data_len)) {
709 					offset -= tbuf->data_len;
710 					tbuf = tbuf->next;
711 				}
712 				/*
713 				 * If there is not enough room in segment,
714 				 * place the digest in the next segment
715 				 */
716 				if ((tbuf->data_len - offset) < options->digest_sz) {
717 					tbuf = tbuf->next;
718 					offset = 0;
719 				}
720 				buf = tbuf;
721 			}
722 
723 			sym_op->aead.digest.data = rte_pktmbuf_mtod_offset(buf,
724 					uint8_t *, offset);
725 			sym_op->aead.digest.phys_addr =
726 					rte_pktmbuf_iova_offset(buf, offset);
727 		}
728 	}
729 
730 	if ((options->test == CPERF_TEST_TYPE_VERIFY) ||
731 	    (options->test == CPERF_TEST_TYPE_LATENCY) ||
732 	    (options->test == CPERF_TEST_TYPE_THROUGHPUT &&
733 	     (options->aead_op == RTE_CRYPTO_AEAD_OP_DECRYPT ||
734 	      options->cipher_op == RTE_CRYPTO_CIPHER_OP_DECRYPT))) {
735 		for (i = 0; i < nb_ops; i++) {
736 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
737 					uint8_t *, iv_offset);
738 
739 			/*
740 			 * If doing AES-CCM, nonce is copied one byte
741 			 * after the start of IV field, and AAD is copied
742 			 * 18 bytes after the start of the AAD field.
743 			 */
744 			if (options->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) {
745 				memcpy(iv_ptr + 1, test_vector->aead_iv.data,
746 					test_vector->aead_iv.length);
747 
748 				memcpy(ops[i]->sym->aead.aad.data + 18,
749 					test_vector->aad.data,
750 					test_vector->aad.length);
751 			} else {
752 				memcpy(iv_ptr, test_vector->aead_iv.data,
753 					test_vector->aead_iv.length);
754 
755 				memcpy(ops[i]->sym->aead.aad.data,
756 					test_vector->aad.data,
757 					test_vector->aad.length);
758 			}
759 		}
760 	}
761 }
762 
763 static void *
764 create_ipsec_session(struct rte_mempool *sess_mp,
765 		uint8_t dev_id,
766 		const struct cperf_options *options,
767 		const struct cperf_test_vector *test_vector,
768 		uint16_t iv_offset)
769 {
770 	struct rte_crypto_sym_xform auth_xform = {0};
771 	struct rte_crypto_sym_xform *crypto_xform;
772 	struct rte_crypto_sym_xform xform = {0};
773 
774 	if (options->aead_algo != 0) {
775 		/* Setup AEAD Parameters */
776 		xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
777 		xform.next = NULL;
778 		xform.aead.algo = options->aead_algo;
779 		xform.aead.op = options->aead_op;
780 		xform.aead.iv.offset = iv_offset;
781 		xform.aead.key.data = test_vector->aead_key.data;
782 		xform.aead.key.length = test_vector->aead_key.length;
783 		xform.aead.iv.length = test_vector->aead_iv.length;
784 		xform.aead.digest_length = options->digest_sz;
785 		xform.aead.aad_length = options->aead_aad_sz;
786 		crypto_xform = &xform;
787 	} else if (options->cipher_algo != 0 && options->auth_algo != 0) {
788 		/* Setup Cipher Parameters */
789 		xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
790 		xform.cipher.algo = options->cipher_algo;
791 		xform.cipher.op = options->cipher_op;
792 		xform.cipher.iv.offset = iv_offset;
793 		xform.cipher.iv.length = test_vector->cipher_iv.length;
794 		/* cipher different than null */
795 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
796 			xform.cipher.key.data = test_vector->cipher_key.data;
797 			xform.cipher.key.length =
798 				test_vector->cipher_key.length;
799 		} else {
800 			xform.cipher.key.data = NULL;
801 			xform.cipher.key.length = 0;
802 		}
803 
804 		/* Setup Auth Parameters */
805 		auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
806 		auth_xform.auth.algo = options->auth_algo;
807 		auth_xform.auth.op = options->auth_op;
808 		auth_xform.auth.iv.offset = iv_offset +
809 				xform.cipher.iv.length;
810 		/* auth different than null */
811 		if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
812 			auth_xform.auth.digest_length = options->digest_sz;
813 			auth_xform.auth.key.length =
814 						test_vector->auth_key.length;
815 			auth_xform.auth.key.data = test_vector->auth_key.data;
816 			auth_xform.auth.iv.length = test_vector->auth_iv.length;
817 		} else {
818 			auth_xform.auth.digest_length = 0;
819 			auth_xform.auth.key.length = 0;
820 			auth_xform.auth.key.data = NULL;
821 			auth_xform.auth.iv.length = 0;
822 		}
823 
824 		if (options->is_outbound) {
825 			crypto_xform = &xform;
826 			xform.next = &auth_xform;
827 			auth_xform.next = NULL;
828 		} else {
829 			crypto_xform = &auth_xform;
830 			auth_xform.next = &xform;
831 			xform.next = NULL;
832 		}
833 	} else {
834 		return NULL;
835 	}
836 
837 #define CPERF_IPSEC_SRC_IP	0x01010101
838 #define CPERF_IPSEC_DST_IP	0x02020202
839 #define CPERF_IPSEC_SALT	0x0
840 #define CPERF_IPSEC_DEFTTL	64
841 	struct rte_security_ipsec_tunnel_param tunnel = {
842 		.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4,
843 		{.ipv4 = {
844 			.src_ip = { .s_addr = CPERF_IPSEC_SRC_IP},
845 			.dst_ip = { .s_addr = CPERF_IPSEC_DST_IP},
846 			.dscp = 0,
847 			.df = 0,
848 			.ttl = CPERF_IPSEC_DEFTTL,
849 		} },
850 	};
851 	struct rte_security_session_conf sess_conf = {
852 		.action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
853 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
854 		{.ipsec = {
855 			.spi = rte_lcore_id() + 1,
856 			/**< For testing sake, lcore_id is taken as SPI so that
857 			 * for every core a different session is created.
858 			 */
859 			.salt = CPERF_IPSEC_SALT,
860 			.options = { 0 },
861 			.replay_win_sz = 0,
862 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
863 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
864 			.tunnel = tunnel,
865 		} },
866 		.userdata = NULL,
867 		.crypto_xform = crypto_xform,
868 	};
869 
870 	if (options->is_outbound)
871 		sess_conf.ipsec.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
872 	else
873 		sess_conf.ipsec.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
874 
875 	void *ctx = rte_cryptodev_get_sec_ctx(dev_id);
876 
877 	/* Create security session */
878 	return (void *)rte_security_session_create(ctx, &sess_conf, sess_mp);
879 }
880 
881 static void *
882 create_tls_session(struct rte_mempool *sess_mp,
883 		uint8_t dev_id,
884 		const struct cperf_options *options,
885 		const struct cperf_test_vector *test_vector,
886 		uint16_t iv_offset)
887 {
888 	struct rte_crypto_sym_xform auth_xform = {0};
889 	struct rte_crypto_sym_xform *crypto_xform;
890 	struct rte_crypto_sym_xform xform = {0};
891 
892 	if (options->aead_algo != 0) {
893 		/* Setup AEAD Parameters */
894 		xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
895 		xform.next = NULL;
896 		xform.aead.algo = options->aead_algo;
897 		xform.aead.op = options->aead_op;
898 		xform.aead.iv.offset = iv_offset;
899 		xform.aead.key.data = test_vector->aead_key.data;
900 		xform.aead.key.length = test_vector->aead_key.length;
901 		xform.aead.iv.length = test_vector->aead_iv.length;
902 		xform.aead.digest_length = options->digest_sz;
903 		xform.aead.aad_length = options->aead_aad_sz;
904 		crypto_xform = &xform;
905 	} else if (options->cipher_algo != 0 && options->auth_algo != 0) {
906 		/* Setup Cipher Parameters */
907 		xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
908 		xform.cipher.algo = options->cipher_algo;
909 		xform.cipher.op = options->cipher_op;
910 		xform.cipher.iv.offset = iv_offset;
911 		xform.cipher.iv.length = test_vector->cipher_iv.length;
912 		/* cipher different than null */
913 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
914 			xform.cipher.key.data = test_vector->cipher_key.data;
915 			xform.cipher.key.length = test_vector->cipher_key.length;
916 		} else {
917 			xform.cipher.key.data = NULL;
918 			xform.cipher.key.length = 0;
919 		}
920 
921 		/* Setup Auth Parameters */
922 		auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
923 		auth_xform.auth.algo = options->auth_algo;
924 		auth_xform.auth.op = options->auth_op;
925 		auth_xform.auth.iv.offset = iv_offset + xform.cipher.iv.length;
926 		/* auth different than null */
927 		if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
928 			auth_xform.auth.digest_length = options->digest_sz;
929 			auth_xform.auth.key.length = test_vector->auth_key.length;
930 			auth_xform.auth.key.data = test_vector->auth_key.data;
931 			auth_xform.auth.iv.length = test_vector->auth_iv.length;
932 		} else {
933 			auth_xform.auth.digest_length = 0;
934 			auth_xform.auth.key.length = 0;
935 			auth_xform.auth.key.data = NULL;
936 			auth_xform.auth.iv.length = 0;
937 		}
938 
939 		if (options->is_outbound) {
940 			/* Currently supporting AUTH then Encrypt mode only for TLS. */
941 			crypto_xform = &auth_xform;
942 			auth_xform.next = &xform;
943 			xform.next = NULL;
944 		} else {
945 			crypto_xform = &xform;
946 			xform.next = &auth_xform;
947 			auth_xform.next = NULL;
948 		}
949 	} else {
950 		return NULL;
951 	}
952 
953 	struct rte_security_tls_record_sess_options opts = {
954 		.iv_gen_disable = 0,
955 		.extra_padding_enable = 0,
956 	};
957 	struct rte_security_session_conf sess_conf = {
958 		.action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
959 		.protocol = RTE_SECURITY_PROTOCOL_TLS_RECORD,
960 		{.tls_record = {
961 			.ver = RTE_SECURITY_VERSION_TLS_1_2,
962 			.options = opts,
963 		} },
964 		.userdata = NULL,
965 		.crypto_xform = crypto_xform,
966 	};
967 	if (options->tls_version)
968 		sess_conf.tls_record.ver = options->tls_version;
969 
970 	if (options->is_outbound)
971 		sess_conf.tls_record.type = RTE_SECURITY_TLS_SESS_TYPE_WRITE;
972 	else
973 		sess_conf.tls_record.type = RTE_SECURITY_TLS_SESS_TYPE_READ;
974 
975 	void *ctx = rte_cryptodev_get_sec_ctx(dev_id);
976 
977 	/* Create security session */
978 	return (void *)rte_security_session_create(ctx, &sess_conf, sess_mp);
979 }
980 
981 static void *
982 cperf_create_session(struct rte_mempool *sess_mp,
983 	uint8_t dev_id,
984 	const struct cperf_options *options,
985 	const struct cperf_test_vector *test_vector,
986 	uint16_t iv_offset)
987 {
988 	struct rte_crypto_sym_xform cipher_xform;
989 	struct rte_crypto_sym_xform auth_xform;
990 	struct rte_crypto_sym_xform aead_xform;
991 	void *sess = NULL;
992 	void *asym_sess = NULL;
993 	struct rte_crypto_asym_xform xform = {0};
994 	int ret;
995 
996 	if (options->op_type == CPERF_ASYM_MODEX) {
997 		xform.next = NULL;
998 		xform.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX;
999 		xform.modex.modulus.data = options->modex_data->modulus.data;
1000 		xform.modex.modulus.length = options->modex_data->modulus.len;
1001 		xform.modex.exponent.data = options->modex_data->exponent.data;
1002 		xform.modex.exponent.length = options->modex_data->exponent.len;
1003 
1004 		ret = rte_cryptodev_asym_session_create(dev_id, &xform,
1005 				sess_mp, &asym_sess);
1006 		if (ret < 0) {
1007 			RTE_LOG(ERR, USER1, "Asym session create failed\n");
1008 			return NULL;
1009 		}
1010 		return asym_sess;
1011 	}
1012 
1013 	if (options->op_type == CPERF_ASYM_SECP256R1) {
1014 		xform.next = NULL;
1015 		xform.xform_type = RTE_CRYPTO_ASYM_XFORM_ECDSA;
1016 		xform.ec.curve_id = options->secp256r1_data->curve;
1017 		xform.ec.pkey.data = options->secp256r1_data->pkey.data;
1018 		xform.ec.pkey.length = options->secp256r1_data->pkey.length;
1019 		xform.ec.q.x.data = options->secp256r1_data->pubkey_qx.data;
1020 		xform.ec.q.x.length = options->secp256r1_data->pubkey_qx.length;
1021 		xform.ec.q.y.data = options->secp256r1_data->pubkey_qy.data;
1022 		xform.ec.q.y.length = options->secp256r1_data->pubkey_qy.length;
1023 
1024 		ret = rte_cryptodev_asym_session_create(dev_id, &xform,
1025 				sess_mp, &asym_sess);
1026 		if (ret < 0) {
1027 			RTE_LOG(ERR, USER1, "ECDSA Asym session create failed\n");
1028 			return NULL;
1029 		}
1030 
1031 		return asym_sess;
1032 	}
1033 
1034 	if (options->op_type == CPERF_ASYM_SM2) {
1035 		xform.next = NULL;
1036 		xform.xform_type = RTE_CRYPTO_ASYM_XFORM_SM2;
1037 		xform.ec.curve_id = options->sm2_data->curve;
1038 		xform.ec.pkey.data = options->sm2_data->pkey.data;
1039 		xform.ec.pkey.length = options->sm2_data->pkey.length;
1040 		xform.ec.q.x.data = options->sm2_data->pubkey_qx.data;
1041 		xform.ec.q.x.length = options->sm2_data->pubkey_qx.length;
1042 		xform.ec.q.y.data = options->sm2_data->pubkey_qy.data;
1043 		xform.ec.q.y.length = options->sm2_data->pubkey_qy.length;
1044 
1045 		ret = rte_cryptodev_asym_session_create(dev_id, &xform,
1046 				sess_mp, &asym_sess);
1047 		if (ret < 0) {
1048 			RTE_LOG(ERR, USER1, "SM2 Asym session create failed\n");
1049 			return NULL;
1050 		}
1051 
1052 		return asym_sess;
1053 	}
1054 #ifdef RTE_LIB_SECURITY
1055 	/*
1056 	 * security only
1057 	 */
1058 	if (options->op_type == CPERF_PDCP) {
1059 		/* Setup Cipher Parameters */
1060 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1061 		cipher_xform.next = NULL;
1062 		cipher_xform.cipher.algo = options->cipher_algo;
1063 		cipher_xform.cipher.op = options->cipher_op;
1064 		cipher_xform.cipher.iv.offset = iv_offset;
1065 		cipher_xform.cipher.iv.length = 4;
1066 
1067 		/* cipher different than null */
1068 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
1069 			cipher_xform.cipher.key.data = test_vector->cipher_key.data;
1070 			cipher_xform.cipher.key.length = test_vector->cipher_key.length;
1071 		} else {
1072 			cipher_xform.cipher.key.data = NULL;
1073 			cipher_xform.cipher.key.length = 0;
1074 		}
1075 
1076 		/* Setup Auth Parameters */
1077 		if (options->auth_algo != 0) {
1078 			auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1079 			auth_xform.next = NULL;
1080 			auth_xform.auth.algo = options->auth_algo;
1081 			auth_xform.auth.op = options->auth_op;
1082 			auth_xform.auth.iv.offset = iv_offset +
1083 				cipher_xform.cipher.iv.length;
1084 
1085 			/* auth different than null */
1086 			if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
1087 				auth_xform.auth.digest_length = options->digest_sz;
1088 				auth_xform.auth.key.length = test_vector->auth_key.length;
1089 				auth_xform.auth.key.data = test_vector->auth_key.data;
1090 				auth_xform.auth.iv.length = test_vector->auth_iv.length;
1091 			} else {
1092 				auth_xform.auth.digest_length = 0;
1093 				auth_xform.auth.key.length = 0;
1094 				auth_xform.auth.key.data = NULL;
1095 				auth_xform.auth.iv.length = 0;
1096 			}
1097 
1098 			cipher_xform.next = &auth_xform;
1099 		} else {
1100 			cipher_xform.next = NULL;
1101 		}
1102 
1103 		struct rte_security_session_conf sess_conf = {
1104 			.action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
1105 			.protocol = RTE_SECURITY_PROTOCOL_PDCP,
1106 			{.pdcp = {
1107 				.bearer = 0x16,
1108 				.domain = options->pdcp_domain,
1109 				.pkt_dir = 0,
1110 				.sn_size = options->pdcp_sn_sz,
1111 				.hfn = options->pdcp_ses_hfn_en ?
1112 					PDCP_DEFAULT_HFN : 0,
1113 				.hfn_threshold = 0x70C0A,
1114 				.sdap_enabled = options->pdcp_sdap,
1115 				.hfn_ovrd = !(options->pdcp_ses_hfn_en),
1116 			} },
1117 			.crypto_xform = &cipher_xform
1118 		};
1119 
1120 		void *ctx = rte_cryptodev_get_sec_ctx(dev_id);
1121 
1122 		/* Create security session */
1123 		return (void *)rte_security_session_create(ctx, &sess_conf, sess_mp);
1124 	}
1125 
1126 	if (options->op_type == CPERF_IPSEC) {
1127 		return create_ipsec_session(sess_mp, dev_id,
1128 				options, test_vector, iv_offset);
1129 	}
1130 
1131 	if (options->op_type == CPERF_TLS) {
1132 		return create_tls_session(sess_mp, dev_id,
1133 				options, test_vector, iv_offset);
1134 	}
1135 
1136 	if (options->op_type == CPERF_DOCSIS) {
1137 		enum rte_security_docsis_direction direction;
1138 
1139 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1140 		cipher_xform.next = NULL;
1141 		cipher_xform.cipher.algo = options->cipher_algo;
1142 		cipher_xform.cipher.op = options->cipher_op;
1143 		cipher_xform.cipher.iv.offset = iv_offset;
1144 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
1145 			cipher_xform.cipher.key.data =
1146 				test_vector->cipher_key.data;
1147 			cipher_xform.cipher.key.length =
1148 				test_vector->cipher_key.length;
1149 			cipher_xform.cipher.iv.length =
1150 				test_vector->cipher_iv.length;
1151 		} else {
1152 			cipher_xform.cipher.key.data = NULL;
1153 			cipher_xform.cipher.key.length = 0;
1154 			cipher_xform.cipher.iv.length = 0;
1155 		}
1156 		cipher_xform.next = NULL;
1157 
1158 		if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1159 			direction = RTE_SECURITY_DOCSIS_DOWNLINK;
1160 		else
1161 			direction = RTE_SECURITY_DOCSIS_UPLINK;
1162 
1163 		struct rte_security_session_conf sess_conf = {
1164 			.action_type =
1165 				RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
1166 			.protocol = RTE_SECURITY_PROTOCOL_DOCSIS,
1167 			{.docsis = {
1168 				.direction = direction,
1169 			} },
1170 			.crypto_xform = &cipher_xform
1171 		};
1172 		void *ctx = rte_cryptodev_get_sec_ctx(dev_id);
1173 
1174 		/* Create security session */
1175 		return (void *)rte_security_session_create(ctx, &sess_conf, sess_mp);
1176 	}
1177 #endif
1178 	/*
1179 	 * cipher only
1180 	 */
1181 	if (options->op_type == CPERF_CIPHER_ONLY) {
1182 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1183 		cipher_xform.next = NULL;
1184 		cipher_xform.cipher.algo = options->cipher_algo;
1185 		cipher_xform.cipher.op = options->cipher_op;
1186 		cipher_xform.cipher.iv.offset = iv_offset;
1187 
1188 		/* cipher different than null */
1189 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
1190 			cipher_xform.cipher.key.data =
1191 					test_vector->cipher_key.data;
1192 			cipher_xform.cipher.key.length =
1193 					test_vector->cipher_key.length;
1194 			cipher_xform.cipher.iv.length =
1195 					test_vector->cipher_iv.length;
1196 		} else {
1197 			cipher_xform.cipher.key.data = NULL;
1198 			cipher_xform.cipher.key.length = 0;
1199 			cipher_xform.cipher.iv.length = 0;
1200 		}
1201 		/* create crypto session */
1202 		sess = rte_cryptodev_sym_session_create(dev_id, &cipher_xform,
1203 				sess_mp);
1204 	/*
1205 	 *  auth only
1206 	 */
1207 	} else if (options->op_type == CPERF_AUTH_ONLY) {
1208 		auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1209 		auth_xform.next = NULL;
1210 		auth_xform.auth.algo = options->auth_algo;
1211 		auth_xform.auth.op = options->auth_op;
1212 		auth_xform.auth.iv.offset = iv_offset;
1213 
1214 		/* auth different than null */
1215 		if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
1216 			auth_xform.auth.digest_length =
1217 					options->digest_sz;
1218 			auth_xform.auth.key.length =
1219 					test_vector->auth_key.length;
1220 			auth_xform.auth.key.data = test_vector->auth_key.data;
1221 			auth_xform.auth.iv.length =
1222 					test_vector->auth_iv.length;
1223 		} else {
1224 			auth_xform.auth.digest_length = 0;
1225 			auth_xform.auth.key.length = 0;
1226 			auth_xform.auth.key.data = NULL;
1227 			auth_xform.auth.iv.length = 0;
1228 		}
1229 		/* create crypto session */
1230 		sess = rte_cryptodev_sym_session_create(dev_id, &auth_xform,
1231 				sess_mp);
1232 	/*
1233 	 * cipher and auth
1234 	 */
1235 	} else if (options->op_type == CPERF_CIPHER_THEN_AUTH
1236 			|| options->op_type == CPERF_AUTH_THEN_CIPHER) {
1237 		/*
1238 		 * cipher
1239 		 */
1240 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1241 		cipher_xform.next = NULL;
1242 		cipher_xform.cipher.algo = options->cipher_algo;
1243 		cipher_xform.cipher.op = options->cipher_op;
1244 		cipher_xform.cipher.iv.offset = iv_offset;
1245 
1246 		/* cipher different than null */
1247 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
1248 			cipher_xform.cipher.key.data =
1249 					test_vector->cipher_key.data;
1250 			cipher_xform.cipher.key.length =
1251 					test_vector->cipher_key.length;
1252 			cipher_xform.cipher.iv.length =
1253 					test_vector->cipher_iv.length;
1254 		} else {
1255 			cipher_xform.cipher.key.data = NULL;
1256 			cipher_xform.cipher.key.length = 0;
1257 			cipher_xform.cipher.iv.length = 0;
1258 		}
1259 
1260 		/*
1261 		 * auth
1262 		 */
1263 		auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1264 		auth_xform.next = NULL;
1265 		auth_xform.auth.algo = options->auth_algo;
1266 		auth_xform.auth.op = options->auth_op;
1267 		auth_xform.auth.iv.offset = iv_offset +
1268 			cipher_xform.cipher.iv.length;
1269 
1270 		/* auth different than null */
1271 		if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
1272 			auth_xform.auth.digest_length = options->digest_sz;
1273 			auth_xform.auth.iv.length = test_vector->auth_iv.length;
1274 			auth_xform.auth.key.length =
1275 					test_vector->auth_key.length;
1276 			auth_xform.auth.key.data =
1277 					test_vector->auth_key.data;
1278 		} else {
1279 			auth_xform.auth.digest_length = 0;
1280 			auth_xform.auth.key.length = 0;
1281 			auth_xform.auth.key.data = NULL;
1282 			auth_xform.auth.iv.length = 0;
1283 		}
1284 
1285 		/* cipher then auth */
1286 		if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
1287 			cipher_xform.next = &auth_xform;
1288 			/* create crypto session */
1289 			sess = rte_cryptodev_sym_session_create(dev_id,
1290 					&cipher_xform, sess_mp);
1291 		} else { /* auth then cipher */
1292 			auth_xform.next = &cipher_xform;
1293 			/* create crypto session */
1294 			sess = rte_cryptodev_sym_session_create(dev_id,
1295 					&auth_xform, sess_mp);
1296 		}
1297 	} else { /* options->op_type == CPERF_AEAD */
1298 		aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1299 		aead_xform.next = NULL;
1300 		aead_xform.aead.algo = options->aead_algo;
1301 		aead_xform.aead.op = options->aead_op;
1302 		aead_xform.aead.iv.offset = iv_offset;
1303 
1304 		aead_xform.aead.key.data =
1305 					test_vector->aead_key.data;
1306 		aead_xform.aead.key.length =
1307 					test_vector->aead_key.length;
1308 		aead_xform.aead.iv.length = test_vector->aead_iv.length;
1309 
1310 		aead_xform.aead.digest_length = options->digest_sz;
1311 		aead_xform.aead.aad_length =
1312 					options->aead_aad_sz;
1313 
1314 		/* Create crypto session */
1315 		sess = rte_cryptodev_sym_session_create(dev_id, &aead_xform,
1316 				sess_mp);
1317 	}
1318 
1319 	return sess;
1320 }
1321 
1322 int
1323 cperf_get_op_functions(const struct cperf_options *options,
1324 		struct cperf_op_fns *op_fns)
1325 {
1326 	memset(op_fns, 0, sizeof(struct cperf_op_fns));
1327 
1328 	op_fns->sess_create = cperf_create_session;
1329 
1330 	switch (options->op_type) {
1331 	case CPERF_AEAD:
1332 		op_fns->populate_ops = cperf_set_ops_aead;
1333 		break;
1334 
1335 	case CPERF_AUTH_THEN_CIPHER:
1336 	case CPERF_CIPHER_THEN_AUTH:
1337 		op_fns->populate_ops = cperf_set_ops_cipher_auth;
1338 		break;
1339 	case CPERF_AUTH_ONLY:
1340 		if (options->auth_algo == RTE_CRYPTO_AUTH_NULL)
1341 			op_fns->populate_ops = cperf_set_ops_null_auth;
1342 		else
1343 			op_fns->populate_ops = cperf_set_ops_auth;
1344 		break;
1345 	case CPERF_CIPHER_ONLY:
1346 		if (options->cipher_algo == RTE_CRYPTO_CIPHER_NULL)
1347 			op_fns->populate_ops = cperf_set_ops_null_cipher;
1348 		else
1349 			op_fns->populate_ops = cperf_set_ops_cipher;
1350 		break;
1351 	case CPERF_ASYM_MODEX:
1352 		op_fns->populate_ops = cperf_set_ops_asym_modex;
1353 		break;
1354 	case CPERF_ASYM_SECP256R1:
1355 		op_fns->populate_ops = cperf_set_ops_asym_ecdsa;
1356 		break;
1357 	case CPERF_ASYM_SM2:
1358 		op_fns->populate_ops = cperf_set_ops_asym_sm2;
1359 		break;
1360 #ifdef RTE_LIB_SECURITY
1361 	case CPERF_PDCP:
1362 	case CPERF_DOCSIS:
1363 		op_fns->populate_ops = cperf_set_ops_security;
1364 		break;
1365 	case CPERF_IPSEC:
1366 		op_fns->populate_ops = cperf_set_ops_security_ipsec;
1367 		break;
1368 	case CPERF_TLS:
1369 		op_fns->populate_ops = cperf_set_ops_security_tls;
1370 		break;
1371 #endif
1372 	default:
1373 		return -1;
1374 	}
1375 
1376 	return 0;
1377 }
1378