xref: /dpdk/app/test-crypto-perf/cperf_ops.c (revision 30a1de105a5f40d77b344a891c4a68f79e815c43)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <rte_ether.h>
7 #include <rte_ip.h>
8 
9 #include "cperf_ops.h"
10 #include "cperf_test_vectors.h"
11 
12 static int
13 cperf_set_ops_asym(struct rte_crypto_op **ops,
14 		   uint32_t src_buf_offset __rte_unused,
15 		   uint32_t dst_buf_offset __rte_unused, uint16_t nb_ops,
16 		   struct rte_cryptodev_sym_session *sess,
17 		   const struct cperf_options *options __rte_unused,
18 		   const struct cperf_test_vector *test_vector __rte_unused,
19 		   uint16_t iv_offset __rte_unused,
20 		   uint32_t *imix_idx __rte_unused,
21 		   uint64_t *tsc_start __rte_unused)
22 {
23 	uint16_t i;
24 	void *asym_sess = (void *)sess;
25 
26 	for (i = 0; i < nb_ops; i++) {
27 		struct rte_crypto_asym_op *asym_op = ops[i]->asym;
28 
29 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
30 		asym_op->modex.base.data = perf_base;
31 		asym_op->modex.base.length = sizeof(perf_base);
32 		asym_op->modex.result.data = perf_mod_result;
33 		asym_op->modex.result.length = sizeof(perf_mod_result);
34 		rte_crypto_op_attach_asym_session(ops[i], asym_sess);
35 	}
36 	return 0;
37 }
38 
39 #ifdef RTE_LIB_SECURITY
40 static void
41 test_ipsec_vec_populate(struct rte_mbuf *m, const struct cperf_options *options,
42 			const struct cperf_test_vector *test_vector)
43 {
44 	struct rte_ipv4_hdr *ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *);
45 
46 	if ((options->aead_op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ||
47 		(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) {
48 		memcpy(ip, test_vector->plaintext.data, m->data_len);
49 
50 		ip->total_length = rte_cpu_to_be_16(m->data_len);
51 	}
52 }
53 
54 static int
55 cperf_set_ops_security(struct rte_crypto_op **ops,
56 		uint32_t src_buf_offset __rte_unused,
57 		uint32_t dst_buf_offset __rte_unused,
58 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
59 		const struct cperf_options *options,
60 		const struct cperf_test_vector *test_vector,
61 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
62 		uint64_t *tsc_start)
63 {
64 	uint64_t tsc_start_temp, tsc_end_temp;
65 	uint16_t i;
66 
67 	for (i = 0; i < nb_ops; i++) {
68 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
69 		struct rte_security_session *sec_sess =
70 			(struct rte_security_session *)sess;
71 		uint32_t buf_sz;
72 
73 		uint32_t *per_pkt_hfn = rte_crypto_op_ctod_offset(ops[i],
74 					uint32_t *, iv_offset);
75 		*per_pkt_hfn = options->pdcp_ses_hfn_en ? 0 : PDCP_DEFAULT_HFN;
76 
77 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
78 		rte_security_attach_session(ops[i], sec_sess);
79 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
80 							src_buf_offset);
81 
82 		if (options->op_type == CPERF_PDCP ||
83 				options->op_type == CPERF_IPSEC) {
84 			/* In case of IPsec, headroom is consumed by PMD,
85 			 * hence resetting it.
86 			 */
87 			sym_op->m_src->data_off = options->headroom_sz;
88 
89 			sym_op->m_src->buf_len = options->segment_sz;
90 			sym_op->m_src->data_len = options->test_buffer_size;
91 			sym_op->m_src->pkt_len = sym_op->m_src->data_len;
92 
93 			if ((options->op_type == CPERF_IPSEC) &&
94 			    (options->test_file == NULL) &&
95 			    (options->test == CPERF_TEST_TYPE_THROUGHPUT)) {
96 				tsc_start_temp = rte_rdtsc_precise();
97 				test_ipsec_vec_populate(sym_op->m_src, options,
98 							test_vector);
99 				tsc_end_temp = rte_rdtsc_precise();
100 
101 				*tsc_start += (tsc_end_temp - tsc_start_temp);
102 			}
103 		}
104 
105 		if (options->op_type == CPERF_DOCSIS) {
106 			if (options->imix_distribution_count) {
107 				buf_sz = options->imix_buffer_sizes[*imix_idx];
108 				*imix_idx = (*imix_idx + 1) % options->pool_sz;
109 			} else
110 				buf_sz = options->test_buffer_size;
111 
112 			sym_op->m_src->buf_len = options->segment_sz;
113 			sym_op->m_src->data_len = buf_sz;
114 			sym_op->m_src->pkt_len = buf_sz;
115 
116 			/* DOCSIS header is not CRC'ed */
117 			sym_op->auth.data.offset = options->docsis_hdr_sz;
118 			sym_op->auth.data.length = buf_sz -
119 				sym_op->auth.data.offset - RTE_ETHER_CRC_LEN;
120 			/*
121 			 * DOCSIS header and SRC and DST MAC addresses are not
122 			 * ciphered
123 			 */
124 			sym_op->cipher.data.offset = sym_op->auth.data.offset +
125 				RTE_ETHER_HDR_LEN - RTE_ETHER_TYPE_LEN;
126 			sym_op->cipher.data.length = buf_sz -
127 				sym_op->cipher.data.offset;
128 		}
129 
130 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
131 		if (dst_buf_offset == 0)
132 			sym_op->m_dst = NULL;
133 		else
134 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
135 							dst_buf_offset);
136 	}
137 
138 	return 0;
139 }
140 #endif
141 
142 static int
143 cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
144 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
145 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
146 		const struct cperf_options *options,
147 		const struct cperf_test_vector *test_vector __rte_unused,
148 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
149 		uint64_t *tsc_start __rte_unused)
150 {
151 	uint16_t i;
152 
153 	for (i = 0; i < nb_ops; i++) {
154 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
155 
156 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
157 		rte_crypto_op_attach_sym_session(ops[i], sess);
158 
159 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
160 							src_buf_offset);
161 
162 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
163 		if (dst_buf_offset == 0)
164 			sym_op->m_dst = NULL;
165 		else
166 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
167 							dst_buf_offset);
168 
169 		/* cipher parameters */
170 		if (options->imix_distribution_count) {
171 			sym_op->cipher.data.length =
172 				options->imix_buffer_sizes[*imix_idx];
173 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
174 		} else
175 			sym_op->cipher.data.length = options->test_buffer_size;
176 		sym_op->cipher.data.offset = 0;
177 	}
178 
179 	return 0;
180 }
181 
182 static int
183 cperf_set_ops_null_auth(struct rte_crypto_op **ops,
184 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
185 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
186 		const struct cperf_options *options,
187 		const struct cperf_test_vector *test_vector __rte_unused,
188 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
189 		uint64_t *tsc_start __rte_unused)
190 {
191 	uint16_t i;
192 
193 	for (i = 0; i < nb_ops; i++) {
194 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
195 
196 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
197 		rte_crypto_op_attach_sym_session(ops[i], sess);
198 
199 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
200 							src_buf_offset);
201 
202 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
203 		if (dst_buf_offset == 0)
204 			sym_op->m_dst = NULL;
205 		else
206 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
207 							dst_buf_offset);
208 
209 		/* auth parameters */
210 		if (options->imix_distribution_count) {
211 			sym_op->auth.data.length =
212 				options->imix_buffer_sizes[*imix_idx];
213 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
214 		} else
215 			sym_op->auth.data.length = options->test_buffer_size;
216 		sym_op->auth.data.offset = 0;
217 	}
218 
219 	return 0;
220 }
221 
222 static int
223 cperf_set_ops_cipher(struct rte_crypto_op **ops,
224 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
225 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
226 		const struct cperf_options *options,
227 		const struct cperf_test_vector *test_vector,
228 		uint16_t iv_offset, uint32_t *imix_idx,
229 		uint64_t *tsc_start __rte_unused)
230 {
231 	uint16_t i;
232 
233 	for (i = 0; i < nb_ops; i++) {
234 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
235 
236 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
237 		rte_crypto_op_attach_sym_session(ops[i], sess);
238 
239 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
240 							src_buf_offset);
241 
242 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
243 		if (dst_buf_offset == 0)
244 			sym_op->m_dst = NULL;
245 		else
246 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
247 							dst_buf_offset);
248 
249 		/* cipher parameters */
250 		if (options->imix_distribution_count) {
251 			sym_op->cipher.data.length =
252 				options->imix_buffer_sizes[*imix_idx];
253 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
254 		} else
255 			sym_op->cipher.data.length = options->test_buffer_size;
256 
257 		if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
258 				options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
259 				options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
260 			sym_op->cipher.data.length <<= 3;
261 
262 		sym_op->cipher.data.offset = 0;
263 	}
264 
265 	if (options->test == CPERF_TEST_TYPE_VERIFY) {
266 		for (i = 0; i < nb_ops; i++) {
267 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
268 					uint8_t *, iv_offset);
269 
270 			memcpy(iv_ptr, test_vector->cipher_iv.data,
271 					test_vector->cipher_iv.length);
272 
273 		}
274 	}
275 
276 	return 0;
277 }
278 
279 static int
280 cperf_set_ops_auth(struct rte_crypto_op **ops,
281 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
282 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
283 		const struct cperf_options *options,
284 		const struct cperf_test_vector *test_vector,
285 		uint16_t iv_offset, uint32_t *imix_idx,
286 		uint64_t *tsc_start __rte_unused)
287 {
288 	uint16_t i;
289 
290 	for (i = 0; i < nb_ops; i++) {
291 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
292 
293 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
294 		rte_crypto_op_attach_sym_session(ops[i], sess);
295 
296 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
297 							src_buf_offset);
298 
299 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
300 		if (dst_buf_offset == 0)
301 			sym_op->m_dst = NULL;
302 		else
303 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
304 							dst_buf_offset);
305 
306 		if (test_vector->auth_iv.length) {
307 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
308 								uint8_t *,
309 								iv_offset);
310 			memcpy(iv_ptr, test_vector->auth_iv.data,
311 					test_vector->auth_iv.length);
312 		}
313 
314 		/* authentication parameters */
315 		if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
316 			sym_op->auth.digest.data = test_vector->digest.data;
317 			sym_op->auth.digest.phys_addr =
318 					test_vector->digest.phys_addr;
319 		} else {
320 
321 			uint32_t offset = options->test_buffer_size;
322 			struct rte_mbuf *buf, *tbuf;
323 
324 			if (options->out_of_place) {
325 				buf = sym_op->m_dst;
326 			} else {
327 				tbuf = sym_op->m_src;
328 				while ((tbuf->next != NULL) &&
329 						(offset >= tbuf->data_len)) {
330 					offset -= tbuf->data_len;
331 					tbuf = tbuf->next;
332 				}
333 				/*
334 				 * If there is not enough room in segment,
335 				 * place the digest in the next segment
336 				 */
337 				if ((tbuf->data_len - offset) < options->digest_sz) {
338 					tbuf = tbuf->next;
339 					offset = 0;
340 				}
341 				buf = tbuf;
342 			}
343 
344 			sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
345 					uint8_t *, offset);
346 			sym_op->auth.digest.phys_addr =
347 					rte_pktmbuf_iova_offset(buf, offset);
348 
349 		}
350 
351 		if (options->imix_distribution_count) {
352 			sym_op->auth.data.length =
353 				options->imix_buffer_sizes[*imix_idx];
354 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
355 		} else
356 			sym_op->auth.data.length = options->test_buffer_size;
357 
358 		if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
359 				options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
360 				options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3)
361 			sym_op->auth.data.length <<= 3;
362 
363 		sym_op->auth.data.offset = 0;
364 	}
365 
366 	if (options->test == CPERF_TEST_TYPE_VERIFY) {
367 		if (test_vector->auth_iv.length) {
368 			for (i = 0; i < nb_ops; i++) {
369 				uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
370 						uint8_t *, iv_offset);
371 
372 				memcpy(iv_ptr, test_vector->auth_iv.data,
373 						test_vector->auth_iv.length);
374 			}
375 		}
376 	}
377 	return 0;
378 }
379 
380 static int
381 cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
382 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
383 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
384 		const struct cperf_options *options,
385 		const struct cperf_test_vector *test_vector,
386 		uint16_t iv_offset, uint32_t *imix_idx,
387 		uint64_t *tsc_start __rte_unused)
388 {
389 	uint16_t i;
390 
391 	for (i = 0; i < nb_ops; i++) {
392 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
393 
394 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
395 		rte_crypto_op_attach_sym_session(ops[i], sess);
396 
397 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
398 							src_buf_offset);
399 
400 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
401 		if (dst_buf_offset == 0)
402 			sym_op->m_dst = NULL;
403 		else
404 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
405 							dst_buf_offset);
406 
407 		/* cipher parameters */
408 		if (options->imix_distribution_count) {
409 			sym_op->cipher.data.length =
410 				options->imix_buffer_sizes[*imix_idx];
411 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
412 		} else
413 			sym_op->cipher.data.length = options->test_buffer_size;
414 
415 		if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
416 				options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
417 				options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
418 			sym_op->cipher.data.length <<= 3;
419 
420 		sym_op->cipher.data.offset = 0;
421 
422 		/* authentication parameters */
423 		if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
424 			sym_op->auth.digest.data = test_vector->digest.data;
425 			sym_op->auth.digest.phys_addr =
426 					test_vector->digest.phys_addr;
427 		} else {
428 
429 			uint32_t offset = options->test_buffer_size;
430 			struct rte_mbuf *buf, *tbuf;
431 
432 			if (options->out_of_place) {
433 				buf = sym_op->m_dst;
434 			} else {
435 				tbuf = sym_op->m_src;
436 				while ((tbuf->next != NULL) &&
437 						(offset >= tbuf->data_len)) {
438 					offset -= tbuf->data_len;
439 					tbuf = tbuf->next;
440 				}
441 				/*
442 				 * If there is not enough room in segment,
443 				 * place the digest in the next segment
444 				 */
445 				if ((tbuf->data_len - offset) < options->digest_sz) {
446 					tbuf = tbuf->next;
447 					offset = 0;
448 				}
449 				buf = tbuf;
450 			}
451 
452 			sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
453 					uint8_t *, offset);
454 			sym_op->auth.digest.phys_addr =
455 					rte_pktmbuf_iova_offset(buf, offset);
456 		}
457 
458 		if (options->imix_distribution_count) {
459 			sym_op->auth.data.length =
460 				options->imix_buffer_sizes[*imix_idx];
461 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
462 		} else
463 			sym_op->auth.data.length = options->test_buffer_size;
464 
465 		if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
466 				options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
467 				options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3)
468 			sym_op->auth.data.length <<= 3;
469 
470 		sym_op->auth.data.offset = 0;
471 	}
472 
473 	if (options->test == CPERF_TEST_TYPE_VERIFY) {
474 		for (i = 0; i < nb_ops; i++) {
475 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
476 					uint8_t *, iv_offset);
477 
478 			memcpy(iv_ptr, test_vector->cipher_iv.data,
479 					test_vector->cipher_iv.length);
480 			if (test_vector->auth_iv.length) {
481 				/*
482 				 * Copy IV after the crypto operation and
483 				 * the cipher IV
484 				 */
485 				iv_ptr += test_vector->cipher_iv.length;
486 				memcpy(iv_ptr, test_vector->auth_iv.data,
487 						test_vector->auth_iv.length);
488 			}
489 		}
490 
491 	}
492 
493 	return 0;
494 }
495 
496 static int
497 cperf_set_ops_aead(struct rte_crypto_op **ops,
498 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
499 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
500 		const struct cperf_options *options,
501 		const struct cperf_test_vector *test_vector,
502 		uint16_t iv_offset, uint32_t *imix_idx,
503 		uint64_t *tsc_start __rte_unused)
504 {
505 	uint16_t i;
506 	/* AAD is placed after the IV */
507 	uint16_t aad_offset = iv_offset +
508 			RTE_ALIGN_CEIL(test_vector->aead_iv.length, 16);
509 
510 	for (i = 0; i < nb_ops; i++) {
511 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
512 
513 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
514 		rte_crypto_op_attach_sym_session(ops[i], sess);
515 
516 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
517 							src_buf_offset);
518 
519 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
520 		if (dst_buf_offset == 0)
521 			sym_op->m_dst = NULL;
522 		else
523 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
524 							dst_buf_offset);
525 
526 		/* AEAD parameters */
527 		if (options->imix_distribution_count) {
528 			sym_op->aead.data.length =
529 				options->imix_buffer_sizes[*imix_idx];
530 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
531 		} else
532 			sym_op->aead.data.length = options->test_buffer_size;
533 		sym_op->aead.data.offset = 0;
534 
535 		sym_op->aead.aad.data = rte_crypto_op_ctod_offset(ops[i],
536 					uint8_t *, aad_offset);
537 		sym_op->aead.aad.phys_addr = rte_crypto_op_ctophys_offset(ops[i],
538 					aad_offset);
539 
540 		if (options->aead_op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
541 			sym_op->aead.digest.data = test_vector->digest.data;
542 			sym_op->aead.digest.phys_addr =
543 					test_vector->digest.phys_addr;
544 		} else {
545 
546 			uint32_t offset = sym_op->aead.data.length +
547 						sym_op->aead.data.offset;
548 			struct rte_mbuf *buf, *tbuf;
549 
550 			if (options->out_of_place) {
551 				buf = sym_op->m_dst;
552 			} else {
553 				tbuf = sym_op->m_src;
554 				while ((tbuf->next != NULL) &&
555 						(offset >= tbuf->data_len)) {
556 					offset -= tbuf->data_len;
557 					tbuf = tbuf->next;
558 				}
559 				/*
560 				 * If there is not enough room in segment,
561 				 * place the digest in the next segment
562 				 */
563 				if ((tbuf->data_len - offset) < options->digest_sz) {
564 					tbuf = tbuf->next;
565 					offset = 0;
566 				}
567 				buf = tbuf;
568 			}
569 
570 			sym_op->aead.digest.data = rte_pktmbuf_mtod_offset(buf,
571 					uint8_t *, offset);
572 			sym_op->aead.digest.phys_addr =
573 					rte_pktmbuf_iova_offset(buf, offset);
574 		}
575 	}
576 
577 	if (options->test == CPERF_TEST_TYPE_VERIFY) {
578 		for (i = 0; i < nb_ops; i++) {
579 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
580 					uint8_t *, iv_offset);
581 
582 			/*
583 			 * If doing AES-CCM, nonce is copied one byte
584 			 * after the start of IV field, and AAD is copied
585 			 * 18 bytes after the start of the AAD field.
586 			 */
587 			if (options->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) {
588 				memcpy(iv_ptr + 1, test_vector->aead_iv.data,
589 					test_vector->aead_iv.length);
590 
591 				memcpy(ops[i]->sym->aead.aad.data + 18,
592 					test_vector->aad.data,
593 					test_vector->aad.length);
594 			} else {
595 				memcpy(iv_ptr, test_vector->aead_iv.data,
596 					test_vector->aead_iv.length);
597 
598 				memcpy(ops[i]->sym->aead.aad.data,
599 					test_vector->aad.data,
600 					test_vector->aad.length);
601 			}
602 		}
603 	}
604 
605 	return 0;
606 }
607 
608 static struct rte_cryptodev_sym_session *
609 create_ipsec_session(struct rte_mempool *sess_mp,
610 		struct rte_mempool *priv_mp,
611 		uint8_t dev_id,
612 		const struct cperf_options *options,
613 		const struct cperf_test_vector *test_vector,
614 		uint16_t iv_offset)
615 {
616 	struct rte_crypto_sym_xform xform = {0};
617 	struct rte_crypto_sym_xform auth_xform = {0};
618 
619 	if (options->aead_algo != 0) {
620 		/* Setup AEAD Parameters */
621 		xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
622 		xform.next = NULL;
623 		xform.aead.algo = options->aead_algo;
624 		xform.aead.op = options->aead_op;
625 		xform.aead.iv.offset = iv_offset;
626 		xform.aead.key.data = test_vector->aead_key.data;
627 		xform.aead.key.length = test_vector->aead_key.length;
628 		xform.aead.iv.length = test_vector->aead_iv.length;
629 		xform.aead.digest_length = options->digest_sz;
630 		xform.aead.aad_length = options->aead_aad_sz;
631 	} else if (options->cipher_algo != 0 && options->auth_algo != 0) {
632 		/* Setup Cipher Parameters */
633 		xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
634 		xform.next = NULL;
635 		xform.cipher.algo = options->cipher_algo;
636 		xform.cipher.op = options->cipher_op;
637 		xform.cipher.iv.offset = iv_offset;
638 		xform.cipher.iv.length = test_vector->cipher_iv.length;
639 		/* cipher different than null */
640 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
641 			xform.cipher.key.data = test_vector->cipher_key.data;
642 			xform.cipher.key.length =
643 				test_vector->cipher_key.length;
644 		} else {
645 			xform.cipher.key.data = NULL;
646 			xform.cipher.key.length = 0;
647 		}
648 
649 		/* Setup Auth Parameters */
650 		auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
651 		auth_xform.next = NULL;
652 		auth_xform.auth.algo = options->auth_algo;
653 		auth_xform.auth.op = options->auth_op;
654 		auth_xform.auth.iv.offset = iv_offset +
655 				xform.cipher.iv.length;
656 		/* auth different than null */
657 		if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
658 			auth_xform.auth.digest_length = options->digest_sz;
659 			auth_xform.auth.key.length =
660 						test_vector->auth_key.length;
661 			auth_xform.auth.key.data = test_vector->auth_key.data;
662 			auth_xform.auth.iv.length = test_vector->auth_iv.length;
663 		} else {
664 			auth_xform.auth.digest_length = 0;
665 			auth_xform.auth.key.length = 0;
666 			auth_xform.auth.key.data = NULL;
667 			auth_xform.auth.iv.length = 0;
668 		}
669 
670 		xform.next = &auth_xform;
671 	} else {
672 		return NULL;
673 	}
674 
675 #define CPERF_IPSEC_SRC_IP	0x01010101
676 #define CPERF_IPSEC_DST_IP	0x02020202
677 #define CPERF_IPSEC_SALT	0x0
678 #define CPERF_IPSEC_DEFTTL	64
679 	struct rte_security_ipsec_tunnel_param tunnel = {
680 		.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4,
681 		{.ipv4 = {
682 			.src_ip = { .s_addr = CPERF_IPSEC_SRC_IP},
683 			.dst_ip = { .s_addr = CPERF_IPSEC_DST_IP},
684 			.dscp = 0,
685 			.df = 0,
686 			.ttl = CPERF_IPSEC_DEFTTL,
687 		} },
688 	};
689 	struct rte_security_session_conf sess_conf = {
690 		.action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
691 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
692 		{.ipsec = {
693 			.spi = rte_lcore_id(),
694 			/**< For testing sake, lcore_id is taken as SPI so that
695 			 * for every core a different session is created.
696 			 */
697 			.salt = CPERF_IPSEC_SALT,
698 			.options = { 0 },
699 			.replay_win_sz = 0,
700 			.direction =
701 				((options->cipher_op ==
702 					RTE_CRYPTO_CIPHER_OP_ENCRYPT) &&
703 				(options->auth_op ==
704 					RTE_CRYPTO_AUTH_OP_GENERATE)) ||
705 				(options->aead_op ==
706 					RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
707 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS :
708 				RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
709 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
710 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
711 			.tunnel = tunnel,
712 		} },
713 		.userdata = NULL,
714 		.crypto_xform = &xform
715 	};
716 
717 	struct rte_security_ctx *ctx = (struct rte_security_ctx *)
718 				rte_cryptodev_get_sec_ctx(dev_id);
719 
720 	/* Create security session */
721 	return (void *)rte_security_session_create(ctx,
722 				&sess_conf, sess_mp, priv_mp);
723 }
724 
725 static struct rte_cryptodev_sym_session *
726 cperf_create_session(struct rte_mempool *sess_mp,
727 	struct rte_mempool *priv_mp,
728 	uint8_t dev_id,
729 	const struct cperf_options *options,
730 	const struct cperf_test_vector *test_vector,
731 	uint16_t iv_offset)
732 {
733 	struct rte_crypto_sym_xform cipher_xform;
734 	struct rte_crypto_sym_xform auth_xform;
735 	struct rte_crypto_sym_xform aead_xform;
736 	struct rte_cryptodev_sym_session *sess = NULL;
737 	void *asym_sess = NULL;
738 	struct rte_crypto_asym_xform xform = {0};
739 	int ret;
740 
741 	if (options->op_type == CPERF_ASYM_MODEX) {
742 		xform.next = NULL;
743 		xform.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX;
744 		xform.modex.modulus.data = perf_mod_p;
745 		xform.modex.modulus.length = sizeof(perf_mod_p);
746 		xform.modex.exponent.data = perf_mod_e;
747 		xform.modex.exponent.length = sizeof(perf_mod_e);
748 
749 		ret = rte_cryptodev_asym_session_create(dev_id, &xform,
750 				sess_mp, &asym_sess);
751 		if (ret < 0) {
752 			RTE_LOG(ERR, USER1, "Asym session create failed\n");
753 			return NULL;
754 		}
755 		return asym_sess;
756 	}
757 #ifdef RTE_LIB_SECURITY
758 	/*
759 	 * security only
760 	 */
761 	if (options->op_type == CPERF_PDCP) {
762 		/* Setup Cipher Parameters */
763 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
764 		cipher_xform.next = NULL;
765 		cipher_xform.cipher.algo = options->cipher_algo;
766 		cipher_xform.cipher.op = options->cipher_op;
767 		cipher_xform.cipher.iv.offset = iv_offset;
768 		cipher_xform.cipher.iv.length = 4;
769 
770 		/* cipher different than null */
771 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
772 			cipher_xform.cipher.key.data = test_vector->cipher_key.data;
773 			cipher_xform.cipher.key.length = test_vector->cipher_key.length;
774 		} else {
775 			cipher_xform.cipher.key.data = NULL;
776 			cipher_xform.cipher.key.length = 0;
777 		}
778 
779 		/* Setup Auth Parameters */
780 		if (options->auth_algo != 0) {
781 			auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
782 			auth_xform.next = NULL;
783 			auth_xform.auth.algo = options->auth_algo;
784 			auth_xform.auth.op = options->auth_op;
785 			auth_xform.auth.iv.offset = iv_offset +
786 				cipher_xform.cipher.iv.length;
787 
788 			/* auth different than null */
789 			if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
790 				auth_xform.auth.digest_length = options->digest_sz;
791 				auth_xform.auth.key.length = test_vector->auth_key.length;
792 				auth_xform.auth.key.data = test_vector->auth_key.data;
793 				auth_xform.auth.iv.length = test_vector->auth_iv.length;
794 			} else {
795 				auth_xform.auth.digest_length = 0;
796 				auth_xform.auth.key.length = 0;
797 				auth_xform.auth.key.data = NULL;
798 				auth_xform.auth.iv.length = 0;
799 			}
800 
801 			cipher_xform.next = &auth_xform;
802 		} else {
803 			cipher_xform.next = NULL;
804 		}
805 
806 		struct rte_security_session_conf sess_conf = {
807 			.action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
808 			.protocol = RTE_SECURITY_PROTOCOL_PDCP,
809 			{.pdcp = {
810 				.bearer = 0x16,
811 				.domain = options->pdcp_domain,
812 				.pkt_dir = 0,
813 				.sn_size = options->pdcp_sn_sz,
814 				.hfn = options->pdcp_ses_hfn_en ?
815 					PDCP_DEFAULT_HFN : 0,
816 				.hfn_threshold = 0x70C0A,
817 				.hfn_ovrd = !(options->pdcp_ses_hfn_en),
818 			} },
819 			.crypto_xform = &cipher_xform
820 		};
821 
822 		struct rte_security_ctx *ctx = (struct rte_security_ctx *)
823 					rte_cryptodev_get_sec_ctx(dev_id);
824 
825 		/* Create security session */
826 		return (void *)rte_security_session_create(ctx,
827 					&sess_conf, sess_mp, priv_mp);
828 	}
829 
830 	if (options->op_type == CPERF_IPSEC) {
831 		return create_ipsec_session(sess_mp, priv_mp, dev_id,
832 				options, test_vector, iv_offset);
833 	}
834 
835 	if (options->op_type == CPERF_DOCSIS) {
836 		enum rte_security_docsis_direction direction;
837 
838 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
839 		cipher_xform.next = NULL;
840 		cipher_xform.cipher.algo = options->cipher_algo;
841 		cipher_xform.cipher.op = options->cipher_op;
842 		cipher_xform.cipher.iv.offset = iv_offset;
843 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
844 			cipher_xform.cipher.key.data =
845 				test_vector->cipher_key.data;
846 			cipher_xform.cipher.key.length =
847 				test_vector->cipher_key.length;
848 			cipher_xform.cipher.iv.length =
849 				test_vector->cipher_iv.length;
850 		} else {
851 			cipher_xform.cipher.key.data = NULL;
852 			cipher_xform.cipher.key.length = 0;
853 			cipher_xform.cipher.iv.length = 0;
854 		}
855 		cipher_xform.next = NULL;
856 
857 		if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
858 			direction = RTE_SECURITY_DOCSIS_DOWNLINK;
859 		else
860 			direction = RTE_SECURITY_DOCSIS_UPLINK;
861 
862 		struct rte_security_session_conf sess_conf = {
863 			.action_type =
864 				RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
865 			.protocol = RTE_SECURITY_PROTOCOL_DOCSIS,
866 			{.docsis = {
867 				.direction = direction,
868 			} },
869 			.crypto_xform = &cipher_xform
870 		};
871 		struct rte_security_ctx *ctx = (struct rte_security_ctx *)
872 					rte_cryptodev_get_sec_ctx(dev_id);
873 
874 		/* Create security session */
875 		return (void *)rte_security_session_create(ctx,
876 					&sess_conf, sess_mp, priv_mp);
877 	}
878 #endif
879 	sess = rte_cryptodev_sym_session_create(sess_mp);
880 	/*
881 	 * cipher only
882 	 */
883 	if (options->op_type == CPERF_CIPHER_ONLY) {
884 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
885 		cipher_xform.next = NULL;
886 		cipher_xform.cipher.algo = options->cipher_algo;
887 		cipher_xform.cipher.op = options->cipher_op;
888 		cipher_xform.cipher.iv.offset = iv_offset;
889 
890 		/* cipher different than null */
891 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
892 			cipher_xform.cipher.key.data =
893 					test_vector->cipher_key.data;
894 			cipher_xform.cipher.key.length =
895 					test_vector->cipher_key.length;
896 			cipher_xform.cipher.iv.length =
897 					test_vector->cipher_iv.length;
898 		} else {
899 			cipher_xform.cipher.key.data = NULL;
900 			cipher_xform.cipher.key.length = 0;
901 			cipher_xform.cipher.iv.length = 0;
902 		}
903 		/* create crypto session */
904 		rte_cryptodev_sym_session_init(dev_id, sess, &cipher_xform,
905 				priv_mp);
906 	/*
907 	 *  auth only
908 	 */
909 	} else if (options->op_type == CPERF_AUTH_ONLY) {
910 		auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
911 		auth_xform.next = NULL;
912 		auth_xform.auth.algo = options->auth_algo;
913 		auth_xform.auth.op = options->auth_op;
914 		auth_xform.auth.iv.offset = iv_offset;
915 
916 		/* auth different than null */
917 		if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
918 			auth_xform.auth.digest_length =
919 					options->digest_sz;
920 			auth_xform.auth.key.length =
921 					test_vector->auth_key.length;
922 			auth_xform.auth.key.data = test_vector->auth_key.data;
923 			auth_xform.auth.iv.length =
924 					test_vector->auth_iv.length;
925 		} else {
926 			auth_xform.auth.digest_length = 0;
927 			auth_xform.auth.key.length = 0;
928 			auth_xform.auth.key.data = NULL;
929 			auth_xform.auth.iv.length = 0;
930 		}
931 		/* create crypto session */
932 		rte_cryptodev_sym_session_init(dev_id, sess, &auth_xform,
933 				priv_mp);
934 	/*
935 	 * cipher and auth
936 	 */
937 	} else if (options->op_type == CPERF_CIPHER_THEN_AUTH
938 			|| options->op_type == CPERF_AUTH_THEN_CIPHER) {
939 		/*
940 		 * cipher
941 		 */
942 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
943 		cipher_xform.next = NULL;
944 		cipher_xform.cipher.algo = options->cipher_algo;
945 		cipher_xform.cipher.op = options->cipher_op;
946 		cipher_xform.cipher.iv.offset = iv_offset;
947 
948 		/* cipher different than null */
949 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
950 			cipher_xform.cipher.key.data =
951 					test_vector->cipher_key.data;
952 			cipher_xform.cipher.key.length =
953 					test_vector->cipher_key.length;
954 			cipher_xform.cipher.iv.length =
955 					test_vector->cipher_iv.length;
956 		} else {
957 			cipher_xform.cipher.key.data = NULL;
958 			cipher_xform.cipher.key.length = 0;
959 			cipher_xform.cipher.iv.length = 0;
960 		}
961 
962 		/*
963 		 * auth
964 		 */
965 		auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
966 		auth_xform.next = NULL;
967 		auth_xform.auth.algo = options->auth_algo;
968 		auth_xform.auth.op = options->auth_op;
969 		auth_xform.auth.iv.offset = iv_offset +
970 			cipher_xform.cipher.iv.length;
971 
972 		/* auth different than null */
973 		if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
974 			auth_xform.auth.digest_length = options->digest_sz;
975 			auth_xform.auth.iv.length = test_vector->auth_iv.length;
976 			auth_xform.auth.key.length =
977 					test_vector->auth_key.length;
978 			auth_xform.auth.key.data =
979 					test_vector->auth_key.data;
980 		} else {
981 			auth_xform.auth.digest_length = 0;
982 			auth_xform.auth.key.length = 0;
983 			auth_xform.auth.key.data = NULL;
984 			auth_xform.auth.iv.length = 0;
985 		}
986 
987 		/* cipher then auth */
988 		if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
989 			cipher_xform.next = &auth_xform;
990 			/* create crypto session */
991 			rte_cryptodev_sym_session_init(dev_id,
992 					sess, &cipher_xform, priv_mp);
993 		} else { /* auth then cipher */
994 			auth_xform.next = &cipher_xform;
995 			/* create crypto session */
996 			rte_cryptodev_sym_session_init(dev_id,
997 					sess, &auth_xform, priv_mp);
998 		}
999 	} else { /* options->op_type == CPERF_AEAD */
1000 		aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1001 		aead_xform.next = NULL;
1002 		aead_xform.aead.algo = options->aead_algo;
1003 		aead_xform.aead.op = options->aead_op;
1004 		aead_xform.aead.iv.offset = iv_offset;
1005 
1006 		aead_xform.aead.key.data =
1007 					test_vector->aead_key.data;
1008 		aead_xform.aead.key.length =
1009 					test_vector->aead_key.length;
1010 		aead_xform.aead.iv.length = test_vector->aead_iv.length;
1011 
1012 		aead_xform.aead.digest_length = options->digest_sz;
1013 		aead_xform.aead.aad_length =
1014 					options->aead_aad_sz;
1015 
1016 		/* Create crypto session */
1017 		rte_cryptodev_sym_session_init(dev_id,
1018 					sess, &aead_xform, priv_mp);
1019 	}
1020 
1021 	return sess;
1022 }
1023 
1024 int
1025 cperf_get_op_functions(const struct cperf_options *options,
1026 		struct cperf_op_fns *op_fns)
1027 {
1028 	memset(op_fns, 0, sizeof(struct cperf_op_fns));
1029 
1030 	op_fns->sess_create = cperf_create_session;
1031 
1032 	switch (options->op_type) {
1033 	case CPERF_AEAD:
1034 		op_fns->populate_ops = cperf_set_ops_aead;
1035 		break;
1036 
1037 	case CPERF_AUTH_THEN_CIPHER:
1038 	case CPERF_CIPHER_THEN_AUTH:
1039 		op_fns->populate_ops = cperf_set_ops_cipher_auth;
1040 		break;
1041 	case CPERF_AUTH_ONLY:
1042 		if (options->auth_algo == RTE_CRYPTO_AUTH_NULL)
1043 			op_fns->populate_ops = cperf_set_ops_null_auth;
1044 		else
1045 			op_fns->populate_ops = cperf_set_ops_auth;
1046 		break;
1047 	case CPERF_CIPHER_ONLY:
1048 		if (options->cipher_algo == RTE_CRYPTO_CIPHER_NULL)
1049 			op_fns->populate_ops = cperf_set_ops_null_cipher;
1050 		else
1051 			op_fns->populate_ops = cperf_set_ops_cipher;
1052 		break;
1053 	case CPERF_ASYM_MODEX:
1054 		op_fns->populate_ops = cperf_set_ops_asym;
1055 		break;
1056 #ifdef RTE_LIB_SECURITY
1057 	case CPERF_PDCP:
1058 	case CPERF_IPSEC:
1059 	case CPERF_DOCSIS:
1060 		op_fns->populate_ops = cperf_set_ops_security;
1061 		break;
1062 #endif
1063 	default:
1064 		return -1;
1065 	}
1066 
1067 	return 0;
1068 }
1069