xref: /dpdk/app/test-crypto-perf/cperf_ops.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <rte_ether.h>
7 #include <rte_ip.h>
8 
9 #include "cperf_ops.h"
10 #include "cperf_test_vectors.h"
11 
12 static int
13 cperf_set_ops_asym(struct rte_crypto_op **ops,
14 		   uint32_t src_buf_offset __rte_unused,
15 		   uint32_t dst_buf_offset __rte_unused, uint16_t nb_ops,
16 		   struct rte_cryptodev_sym_session *sess,
17 		   const struct cperf_options *options __rte_unused,
18 		   const struct cperf_test_vector *test_vector __rte_unused,
19 		   uint16_t iv_offset __rte_unused,
20 		   uint32_t *imix_idx __rte_unused,
21 		   uint64_t *tsc_start __rte_unused)
22 {
23 	uint16_t i;
24 	uint8_t result[sizeof(perf_mod_p)] = { 0 };
25 	struct rte_cryptodev_asym_session *asym_sess = (void *)sess;
26 
27 	for (i = 0; i < nb_ops; i++) {
28 		struct rte_crypto_asym_op *asym_op = ops[i]->asym;
29 
30 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
31 		asym_op->modex.base.data = perf_base;
32 		asym_op->modex.base.length = sizeof(perf_base);
33 		asym_op->modex.result.data = result;
34 		asym_op->modex.result.length = sizeof(result);
35 		rte_crypto_op_attach_asym_session(ops[i], asym_sess);
36 	}
37 	return 0;
38 }
39 
40 #ifdef RTE_LIB_SECURITY
41 static void
42 test_ipsec_vec_populate(struct rte_mbuf *m, const struct cperf_options *options,
43 			const struct cperf_test_vector *test_vector)
44 {
45 	struct rte_ipv4_hdr *ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *);
46 
47 	if ((options->aead_op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ||
48 		(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) {
49 		memcpy(ip, test_vector->plaintext.data, m->data_len);
50 
51 		ip->total_length = rte_cpu_to_be_16(m->data_len);
52 	}
53 }
54 
55 static int
56 cperf_set_ops_security(struct rte_crypto_op **ops,
57 		uint32_t src_buf_offset __rte_unused,
58 		uint32_t dst_buf_offset __rte_unused,
59 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
60 		const struct cperf_options *options,
61 		const struct cperf_test_vector *test_vector,
62 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
63 		uint64_t *tsc_start)
64 {
65 	uint64_t tsc_start_temp, tsc_end_temp;
66 	uint16_t i;
67 
68 	for (i = 0; i < nb_ops; i++) {
69 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
70 		struct rte_security_session *sec_sess =
71 			(struct rte_security_session *)sess;
72 		uint32_t buf_sz;
73 
74 		uint32_t *per_pkt_hfn = rte_crypto_op_ctod_offset(ops[i],
75 					uint32_t *, iv_offset);
76 		*per_pkt_hfn = options->pdcp_ses_hfn_en ? 0 : PDCP_DEFAULT_HFN;
77 
78 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
79 		rte_security_attach_session(ops[i], sec_sess);
80 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
81 							src_buf_offset);
82 
83 		if (options->op_type == CPERF_PDCP ||
84 				options->op_type == CPERF_IPSEC) {
85 			/* In case of IPsec, headroom is consumed by PMD,
86 			 * hence resetting it.
87 			 */
88 			sym_op->m_src->data_off = options->headroom_sz;
89 
90 			sym_op->m_src->buf_len = options->segment_sz;
91 			sym_op->m_src->data_len = options->test_buffer_size;
92 			sym_op->m_src->pkt_len = sym_op->m_src->data_len;
93 
94 			if ((options->op_type == CPERF_IPSEC) &&
95 			    (options->test_file == NULL) &&
96 			    (options->test == CPERF_TEST_TYPE_THROUGHPUT)) {
97 				tsc_start_temp = rte_rdtsc_precise();
98 				test_ipsec_vec_populate(sym_op->m_src, options,
99 							test_vector);
100 				tsc_end_temp = rte_rdtsc_precise();
101 
102 				*tsc_start += (tsc_end_temp - tsc_start_temp);
103 			}
104 		}
105 
106 		if (options->op_type == CPERF_DOCSIS) {
107 			if (options->imix_distribution_count) {
108 				buf_sz = options->imix_buffer_sizes[*imix_idx];
109 				*imix_idx = (*imix_idx + 1) % options->pool_sz;
110 			} else
111 				buf_sz = options->test_buffer_size;
112 
113 			sym_op->m_src->buf_len = options->segment_sz;
114 			sym_op->m_src->data_len = buf_sz;
115 			sym_op->m_src->pkt_len = buf_sz;
116 
117 			/* DOCSIS header is not CRC'ed */
118 			sym_op->auth.data.offset = options->docsis_hdr_sz;
119 			sym_op->auth.data.length = buf_sz -
120 				sym_op->auth.data.offset - RTE_ETHER_CRC_LEN;
121 			/*
122 			 * DOCSIS header and SRC and DST MAC addresses are not
123 			 * ciphered
124 			 */
125 			sym_op->cipher.data.offset = sym_op->auth.data.offset +
126 				RTE_ETHER_HDR_LEN - RTE_ETHER_TYPE_LEN;
127 			sym_op->cipher.data.length = buf_sz -
128 				sym_op->cipher.data.offset;
129 		}
130 
131 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
132 		if (dst_buf_offset == 0)
133 			sym_op->m_dst = NULL;
134 		else
135 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
136 							dst_buf_offset);
137 	}
138 
139 	return 0;
140 }
141 #endif
142 
143 static int
144 cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
145 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
146 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
147 		const struct cperf_options *options,
148 		const struct cperf_test_vector *test_vector __rte_unused,
149 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
150 		uint64_t *tsc_start __rte_unused)
151 {
152 	uint16_t i;
153 
154 	for (i = 0; i < nb_ops; i++) {
155 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
156 
157 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
158 		rte_crypto_op_attach_sym_session(ops[i], sess);
159 
160 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
161 							src_buf_offset);
162 
163 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
164 		if (dst_buf_offset == 0)
165 			sym_op->m_dst = NULL;
166 		else
167 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
168 							dst_buf_offset);
169 
170 		/* cipher parameters */
171 		if (options->imix_distribution_count) {
172 			sym_op->cipher.data.length =
173 				options->imix_buffer_sizes[*imix_idx];
174 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
175 		} else
176 			sym_op->cipher.data.length = options->test_buffer_size;
177 		sym_op->cipher.data.offset = 0;
178 	}
179 
180 	return 0;
181 }
182 
183 static int
184 cperf_set_ops_null_auth(struct rte_crypto_op **ops,
185 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
186 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
187 		const struct cperf_options *options,
188 		const struct cperf_test_vector *test_vector __rte_unused,
189 		uint16_t iv_offset __rte_unused, uint32_t *imix_idx,
190 		uint64_t *tsc_start __rte_unused)
191 {
192 	uint16_t i;
193 
194 	for (i = 0; i < nb_ops; i++) {
195 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
196 
197 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
198 		rte_crypto_op_attach_sym_session(ops[i], sess);
199 
200 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
201 							src_buf_offset);
202 
203 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
204 		if (dst_buf_offset == 0)
205 			sym_op->m_dst = NULL;
206 		else
207 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
208 							dst_buf_offset);
209 
210 		/* auth parameters */
211 		if (options->imix_distribution_count) {
212 			sym_op->auth.data.length =
213 				options->imix_buffer_sizes[*imix_idx];
214 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
215 		} else
216 			sym_op->auth.data.length = options->test_buffer_size;
217 		sym_op->auth.data.offset = 0;
218 	}
219 
220 	return 0;
221 }
222 
223 static int
224 cperf_set_ops_cipher(struct rte_crypto_op **ops,
225 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
226 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
227 		const struct cperf_options *options,
228 		const struct cperf_test_vector *test_vector,
229 		uint16_t iv_offset, uint32_t *imix_idx,
230 		uint64_t *tsc_start __rte_unused)
231 {
232 	uint16_t i;
233 
234 	for (i = 0; i < nb_ops; i++) {
235 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
236 
237 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
238 		rte_crypto_op_attach_sym_session(ops[i], sess);
239 
240 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
241 							src_buf_offset);
242 
243 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
244 		if (dst_buf_offset == 0)
245 			sym_op->m_dst = NULL;
246 		else
247 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
248 							dst_buf_offset);
249 
250 		/* cipher parameters */
251 		if (options->imix_distribution_count) {
252 			sym_op->cipher.data.length =
253 				options->imix_buffer_sizes[*imix_idx];
254 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
255 		} else
256 			sym_op->cipher.data.length = options->test_buffer_size;
257 
258 		if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
259 				options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
260 				options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
261 			sym_op->cipher.data.length <<= 3;
262 
263 		sym_op->cipher.data.offset = 0;
264 	}
265 
266 	if (options->test == CPERF_TEST_TYPE_VERIFY) {
267 		for (i = 0; i < nb_ops; i++) {
268 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
269 					uint8_t *, iv_offset);
270 
271 			memcpy(iv_ptr, test_vector->cipher_iv.data,
272 					test_vector->cipher_iv.length);
273 
274 		}
275 	}
276 
277 	return 0;
278 }
279 
280 static int
281 cperf_set_ops_auth(struct rte_crypto_op **ops,
282 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
283 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
284 		const struct cperf_options *options,
285 		const struct cperf_test_vector *test_vector,
286 		uint16_t iv_offset, uint32_t *imix_idx,
287 		uint64_t *tsc_start __rte_unused)
288 {
289 	uint16_t i;
290 
291 	for (i = 0; i < nb_ops; i++) {
292 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
293 
294 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
295 		rte_crypto_op_attach_sym_session(ops[i], sess);
296 
297 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
298 							src_buf_offset);
299 
300 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
301 		if (dst_buf_offset == 0)
302 			sym_op->m_dst = NULL;
303 		else
304 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
305 							dst_buf_offset);
306 
307 		if (test_vector->auth_iv.length) {
308 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
309 								uint8_t *,
310 								iv_offset);
311 			memcpy(iv_ptr, test_vector->auth_iv.data,
312 					test_vector->auth_iv.length);
313 		}
314 
315 		/* authentication parameters */
316 		if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
317 			sym_op->auth.digest.data = test_vector->digest.data;
318 			sym_op->auth.digest.phys_addr =
319 					test_vector->digest.phys_addr;
320 		} else {
321 
322 			uint32_t offset = options->test_buffer_size;
323 			struct rte_mbuf *buf, *tbuf;
324 
325 			if (options->out_of_place) {
326 				buf = sym_op->m_dst;
327 			} else {
328 				tbuf = sym_op->m_src;
329 				while ((tbuf->next != NULL) &&
330 						(offset >= tbuf->data_len)) {
331 					offset -= tbuf->data_len;
332 					tbuf = tbuf->next;
333 				}
334 				/*
335 				 * If there is not enough room in segment,
336 				 * place the digest in the next segment
337 				 */
338 				if ((tbuf->data_len - offset) < options->digest_sz) {
339 					tbuf = tbuf->next;
340 					offset = 0;
341 				}
342 				buf = tbuf;
343 			}
344 
345 			sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
346 					uint8_t *, offset);
347 			sym_op->auth.digest.phys_addr =
348 					rte_pktmbuf_iova_offset(buf, offset);
349 
350 		}
351 
352 		if (options->imix_distribution_count) {
353 			sym_op->auth.data.length =
354 				options->imix_buffer_sizes[*imix_idx];
355 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
356 		} else
357 			sym_op->auth.data.length = options->test_buffer_size;
358 
359 		if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
360 				options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
361 				options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3)
362 			sym_op->auth.data.length <<= 3;
363 
364 		sym_op->auth.data.offset = 0;
365 	}
366 
367 	if (options->test == CPERF_TEST_TYPE_VERIFY) {
368 		if (test_vector->auth_iv.length) {
369 			for (i = 0; i < nb_ops; i++) {
370 				uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
371 						uint8_t *, iv_offset);
372 
373 				memcpy(iv_ptr, test_vector->auth_iv.data,
374 						test_vector->auth_iv.length);
375 			}
376 		}
377 	}
378 	return 0;
379 }
380 
381 static int
382 cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
383 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
384 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
385 		const struct cperf_options *options,
386 		const struct cperf_test_vector *test_vector,
387 		uint16_t iv_offset, uint32_t *imix_idx,
388 		uint64_t *tsc_start __rte_unused)
389 {
390 	uint16_t i;
391 
392 	for (i = 0; i < nb_ops; i++) {
393 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
394 
395 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
396 		rte_crypto_op_attach_sym_session(ops[i], sess);
397 
398 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
399 							src_buf_offset);
400 
401 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
402 		if (dst_buf_offset == 0)
403 			sym_op->m_dst = NULL;
404 		else
405 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
406 							dst_buf_offset);
407 
408 		/* cipher parameters */
409 		if (options->imix_distribution_count) {
410 			sym_op->cipher.data.length =
411 				options->imix_buffer_sizes[*imix_idx];
412 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
413 		} else
414 			sym_op->cipher.data.length = options->test_buffer_size;
415 
416 		if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
417 				options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
418 				options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
419 			sym_op->cipher.data.length <<= 3;
420 
421 		sym_op->cipher.data.offset = 0;
422 
423 		/* authentication parameters */
424 		if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
425 			sym_op->auth.digest.data = test_vector->digest.data;
426 			sym_op->auth.digest.phys_addr =
427 					test_vector->digest.phys_addr;
428 		} else {
429 
430 			uint32_t offset = options->test_buffer_size;
431 			struct rte_mbuf *buf, *tbuf;
432 
433 			if (options->out_of_place) {
434 				buf = sym_op->m_dst;
435 			} else {
436 				tbuf = sym_op->m_src;
437 				while ((tbuf->next != NULL) &&
438 						(offset >= tbuf->data_len)) {
439 					offset -= tbuf->data_len;
440 					tbuf = tbuf->next;
441 				}
442 				/*
443 				 * If there is not enough room in segment,
444 				 * place the digest in the next segment
445 				 */
446 				if ((tbuf->data_len - offset) < options->digest_sz) {
447 					tbuf = tbuf->next;
448 					offset = 0;
449 				}
450 				buf = tbuf;
451 			}
452 
453 			sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
454 					uint8_t *, offset);
455 			sym_op->auth.digest.phys_addr =
456 					rte_pktmbuf_iova_offset(buf, offset);
457 		}
458 
459 		if (options->imix_distribution_count) {
460 			sym_op->auth.data.length =
461 				options->imix_buffer_sizes[*imix_idx];
462 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
463 		} else
464 			sym_op->auth.data.length = options->test_buffer_size;
465 
466 		if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
467 				options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
468 				options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3)
469 			sym_op->auth.data.length <<= 3;
470 
471 		sym_op->auth.data.offset = 0;
472 	}
473 
474 	if (options->test == CPERF_TEST_TYPE_VERIFY) {
475 		for (i = 0; i < nb_ops; i++) {
476 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
477 					uint8_t *, iv_offset);
478 
479 			memcpy(iv_ptr, test_vector->cipher_iv.data,
480 					test_vector->cipher_iv.length);
481 			if (test_vector->auth_iv.length) {
482 				/*
483 				 * Copy IV after the crypto operation and
484 				 * the cipher IV
485 				 */
486 				iv_ptr += test_vector->cipher_iv.length;
487 				memcpy(iv_ptr, test_vector->auth_iv.data,
488 						test_vector->auth_iv.length);
489 			}
490 		}
491 
492 	}
493 
494 	return 0;
495 }
496 
497 static int
498 cperf_set_ops_aead(struct rte_crypto_op **ops,
499 		uint32_t src_buf_offset, uint32_t dst_buf_offset,
500 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
501 		const struct cperf_options *options,
502 		const struct cperf_test_vector *test_vector,
503 		uint16_t iv_offset, uint32_t *imix_idx,
504 		uint64_t *tsc_start __rte_unused)
505 {
506 	uint16_t i;
507 	/* AAD is placed after the IV */
508 	uint16_t aad_offset = iv_offset +
509 			RTE_ALIGN_CEIL(test_vector->aead_iv.length, 16);
510 
511 	for (i = 0; i < nb_ops; i++) {
512 		struct rte_crypto_sym_op *sym_op = ops[i]->sym;
513 
514 		ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
515 		rte_crypto_op_attach_sym_session(ops[i], sess);
516 
517 		sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
518 							src_buf_offset);
519 
520 		/* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
521 		if (dst_buf_offset == 0)
522 			sym_op->m_dst = NULL;
523 		else
524 			sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
525 							dst_buf_offset);
526 
527 		/* AEAD parameters */
528 		if (options->imix_distribution_count) {
529 			sym_op->aead.data.length =
530 				options->imix_buffer_sizes[*imix_idx];
531 			*imix_idx = (*imix_idx + 1) % options->pool_sz;
532 		} else
533 			sym_op->aead.data.length = options->test_buffer_size;
534 		sym_op->aead.data.offset = 0;
535 
536 		sym_op->aead.aad.data = rte_crypto_op_ctod_offset(ops[i],
537 					uint8_t *, aad_offset);
538 		sym_op->aead.aad.phys_addr = rte_crypto_op_ctophys_offset(ops[i],
539 					aad_offset);
540 
541 		if (options->aead_op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
542 			sym_op->aead.digest.data = test_vector->digest.data;
543 			sym_op->aead.digest.phys_addr =
544 					test_vector->digest.phys_addr;
545 		} else {
546 
547 			uint32_t offset = sym_op->aead.data.length +
548 						sym_op->aead.data.offset;
549 			struct rte_mbuf *buf, *tbuf;
550 
551 			if (options->out_of_place) {
552 				buf = sym_op->m_dst;
553 			} else {
554 				tbuf = sym_op->m_src;
555 				while ((tbuf->next != NULL) &&
556 						(offset >= tbuf->data_len)) {
557 					offset -= tbuf->data_len;
558 					tbuf = tbuf->next;
559 				}
560 				/*
561 				 * If there is not enough room in segment,
562 				 * place the digest in the next segment
563 				 */
564 				if ((tbuf->data_len - offset) < options->digest_sz) {
565 					tbuf = tbuf->next;
566 					offset = 0;
567 				}
568 				buf = tbuf;
569 			}
570 
571 			sym_op->aead.digest.data = rte_pktmbuf_mtod_offset(buf,
572 					uint8_t *, offset);
573 			sym_op->aead.digest.phys_addr =
574 					rte_pktmbuf_iova_offset(buf, offset);
575 		}
576 	}
577 
578 	if (options->test == CPERF_TEST_TYPE_VERIFY) {
579 		for (i = 0; i < nb_ops; i++) {
580 			uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
581 					uint8_t *, iv_offset);
582 
583 			/*
584 			 * If doing AES-CCM, nonce is copied one byte
585 			 * after the start of IV field, and AAD is copied
586 			 * 18 bytes after the start of the AAD field.
587 			 */
588 			if (options->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) {
589 				memcpy(iv_ptr + 1, test_vector->aead_iv.data,
590 					test_vector->aead_iv.length);
591 
592 				memcpy(ops[i]->sym->aead.aad.data + 18,
593 					test_vector->aad.data,
594 					test_vector->aad.length);
595 			} else {
596 				memcpy(iv_ptr, test_vector->aead_iv.data,
597 					test_vector->aead_iv.length);
598 
599 				memcpy(ops[i]->sym->aead.aad.data,
600 					test_vector->aad.data,
601 					test_vector->aad.length);
602 			}
603 		}
604 	}
605 
606 	return 0;
607 }
608 
609 static struct rte_cryptodev_sym_session *
610 create_ipsec_session(struct rte_mempool *sess_mp,
611 		struct rte_mempool *priv_mp,
612 		uint8_t dev_id,
613 		const struct cperf_options *options,
614 		const struct cperf_test_vector *test_vector,
615 		uint16_t iv_offset)
616 {
617 	struct rte_crypto_sym_xform xform = {0};
618 	struct rte_crypto_sym_xform auth_xform = {0};
619 
620 	if (options->aead_algo != 0) {
621 		/* Setup AEAD Parameters */
622 		xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
623 		xform.next = NULL;
624 		xform.aead.algo = options->aead_algo;
625 		xform.aead.op = options->aead_op;
626 		xform.aead.iv.offset = iv_offset;
627 		xform.aead.key.data = test_vector->aead_key.data;
628 		xform.aead.key.length = test_vector->aead_key.length;
629 		xform.aead.iv.length = test_vector->aead_iv.length;
630 		xform.aead.digest_length = options->digest_sz;
631 		xform.aead.aad_length = options->aead_aad_sz;
632 	} else if (options->cipher_algo != 0 && options->auth_algo != 0) {
633 		/* Setup Cipher Parameters */
634 		xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
635 		xform.next = NULL;
636 		xform.cipher.algo = options->cipher_algo;
637 		xform.cipher.op = options->cipher_op;
638 		xform.cipher.iv.offset = iv_offset;
639 		xform.cipher.iv.length = test_vector->cipher_iv.length;
640 		/* cipher different than null */
641 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
642 			xform.cipher.key.data = test_vector->cipher_key.data;
643 			xform.cipher.key.length =
644 				test_vector->cipher_key.length;
645 		} else {
646 			xform.cipher.key.data = NULL;
647 			xform.cipher.key.length = 0;
648 		}
649 
650 		/* Setup Auth Parameters */
651 		auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
652 		auth_xform.next = NULL;
653 		auth_xform.auth.algo = options->auth_algo;
654 		auth_xform.auth.op = options->auth_op;
655 		auth_xform.auth.iv.offset = iv_offset +
656 				xform.cipher.iv.length;
657 		/* auth different than null */
658 		if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
659 			auth_xform.auth.digest_length = options->digest_sz;
660 			auth_xform.auth.key.length =
661 						test_vector->auth_key.length;
662 			auth_xform.auth.key.data = test_vector->auth_key.data;
663 			auth_xform.auth.iv.length = test_vector->auth_iv.length;
664 		} else {
665 			auth_xform.auth.digest_length = 0;
666 			auth_xform.auth.key.length = 0;
667 			auth_xform.auth.key.data = NULL;
668 			auth_xform.auth.iv.length = 0;
669 		}
670 
671 		xform.next = &auth_xform;
672 	} else {
673 		return NULL;
674 	}
675 
676 #define CPERF_IPSEC_SRC_IP	0x01010101
677 #define CPERF_IPSEC_DST_IP	0x02020202
678 #define CPERF_IPSEC_SALT	0x0
679 #define CPERF_IPSEC_DEFTTL	64
680 	struct rte_security_ipsec_tunnel_param tunnel = {
681 		.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4,
682 		{.ipv4 = {
683 			.src_ip = { .s_addr = CPERF_IPSEC_SRC_IP},
684 			.dst_ip = { .s_addr = CPERF_IPSEC_DST_IP},
685 			.dscp = 0,
686 			.df = 0,
687 			.ttl = CPERF_IPSEC_DEFTTL,
688 		} },
689 	};
690 	struct rte_security_session_conf sess_conf = {
691 		.action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
692 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
693 		{.ipsec = {
694 			.spi = rte_lcore_id(),
695 			/**< For testing sake, lcore_id is taken as SPI so that
696 			 * for every core a different session is created.
697 			 */
698 			.salt = CPERF_IPSEC_SALT,
699 			.options = { 0 },
700 			.replay_win_sz = 0,
701 			.direction =
702 				((options->cipher_op ==
703 					RTE_CRYPTO_CIPHER_OP_ENCRYPT) &&
704 				(options->auth_op ==
705 					RTE_CRYPTO_AUTH_OP_GENERATE)) ||
706 				(options->aead_op ==
707 					RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
708 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS :
709 				RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
710 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
711 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
712 			.tunnel = tunnel,
713 		} },
714 		.userdata = NULL,
715 		.crypto_xform = &xform
716 	};
717 
718 	struct rte_security_ctx *ctx = (struct rte_security_ctx *)
719 				rte_cryptodev_get_sec_ctx(dev_id);
720 
721 	/* Create security session */
722 	return (void *)rte_security_session_create(ctx,
723 				&sess_conf, sess_mp, priv_mp);
724 }
725 
726 static struct rte_cryptodev_sym_session *
727 cperf_create_session(struct rte_mempool *sess_mp,
728 	struct rte_mempool *priv_mp,
729 	uint8_t dev_id,
730 	const struct cperf_options *options,
731 	const struct cperf_test_vector *test_vector,
732 	uint16_t iv_offset)
733 {
734 	struct rte_crypto_sym_xform cipher_xform;
735 	struct rte_crypto_sym_xform auth_xform;
736 	struct rte_crypto_sym_xform aead_xform;
737 	struct rte_cryptodev_sym_session *sess = NULL;
738 	struct rte_crypto_asym_xform xform = {0};
739 	int rc;
740 
741 	if (options->op_type == CPERF_ASYM_MODEX) {
742 		xform.next = NULL;
743 		xform.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX;
744 		xform.modex.modulus.data = perf_mod_p;
745 		xform.modex.modulus.length = sizeof(perf_mod_p);
746 		xform.modex.exponent.data = perf_mod_e;
747 		xform.modex.exponent.length = sizeof(perf_mod_e);
748 
749 		sess = (void *)rte_cryptodev_asym_session_create(sess_mp);
750 		if (sess == NULL)
751 			return NULL;
752 		rc = rte_cryptodev_asym_session_init(dev_id, (void *)sess,
753 						     &xform, priv_mp);
754 		if (rc < 0) {
755 			if (sess != NULL) {
756 				rte_cryptodev_asym_session_clear(dev_id,
757 								 (void *)sess);
758 				rte_cryptodev_asym_session_free((void *)sess);
759 			}
760 			return NULL;
761 		}
762 		return sess;
763 	}
764 #ifdef RTE_LIB_SECURITY
765 	/*
766 	 * security only
767 	 */
768 	if (options->op_type == CPERF_PDCP) {
769 		/* Setup Cipher Parameters */
770 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
771 		cipher_xform.next = NULL;
772 		cipher_xform.cipher.algo = options->cipher_algo;
773 		cipher_xform.cipher.op = options->cipher_op;
774 		cipher_xform.cipher.iv.offset = iv_offset;
775 		cipher_xform.cipher.iv.length = 4;
776 
777 		/* cipher different than null */
778 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
779 			cipher_xform.cipher.key.data = test_vector->cipher_key.data;
780 			cipher_xform.cipher.key.length = test_vector->cipher_key.length;
781 		} else {
782 			cipher_xform.cipher.key.data = NULL;
783 			cipher_xform.cipher.key.length = 0;
784 		}
785 
786 		/* Setup Auth Parameters */
787 		if (options->auth_algo != 0) {
788 			auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
789 			auth_xform.next = NULL;
790 			auth_xform.auth.algo = options->auth_algo;
791 			auth_xform.auth.op = options->auth_op;
792 			auth_xform.auth.iv.offset = iv_offset +
793 				cipher_xform.cipher.iv.length;
794 
795 			/* auth different than null */
796 			if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
797 				auth_xform.auth.digest_length = options->digest_sz;
798 				auth_xform.auth.key.length = test_vector->auth_key.length;
799 				auth_xform.auth.key.data = test_vector->auth_key.data;
800 				auth_xform.auth.iv.length = test_vector->auth_iv.length;
801 			} else {
802 				auth_xform.auth.digest_length = 0;
803 				auth_xform.auth.key.length = 0;
804 				auth_xform.auth.key.data = NULL;
805 				auth_xform.auth.iv.length = 0;
806 			}
807 
808 			cipher_xform.next = &auth_xform;
809 		} else {
810 			cipher_xform.next = NULL;
811 		}
812 
813 		struct rte_security_session_conf sess_conf = {
814 			.action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
815 			.protocol = RTE_SECURITY_PROTOCOL_PDCP,
816 			{.pdcp = {
817 				.bearer = 0x16,
818 				.domain = options->pdcp_domain,
819 				.pkt_dir = 0,
820 				.sn_size = options->pdcp_sn_sz,
821 				.hfn = options->pdcp_ses_hfn_en ?
822 					PDCP_DEFAULT_HFN : 0,
823 				.hfn_threshold = 0x70C0A,
824 				.hfn_ovrd = !(options->pdcp_ses_hfn_en),
825 			} },
826 			.crypto_xform = &cipher_xform
827 		};
828 
829 		struct rte_security_ctx *ctx = (struct rte_security_ctx *)
830 					rte_cryptodev_get_sec_ctx(dev_id);
831 
832 		/* Create security session */
833 		return (void *)rte_security_session_create(ctx,
834 					&sess_conf, sess_mp, priv_mp);
835 	}
836 
837 	if (options->op_type == CPERF_IPSEC) {
838 		return create_ipsec_session(sess_mp, priv_mp, dev_id,
839 				options, test_vector, iv_offset);
840 	}
841 
842 	if (options->op_type == CPERF_DOCSIS) {
843 		enum rte_security_docsis_direction direction;
844 
845 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
846 		cipher_xform.next = NULL;
847 		cipher_xform.cipher.algo = options->cipher_algo;
848 		cipher_xform.cipher.op = options->cipher_op;
849 		cipher_xform.cipher.iv.offset = iv_offset;
850 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
851 			cipher_xform.cipher.key.data =
852 				test_vector->cipher_key.data;
853 			cipher_xform.cipher.key.length =
854 				test_vector->cipher_key.length;
855 			cipher_xform.cipher.iv.length =
856 				test_vector->cipher_iv.length;
857 		} else {
858 			cipher_xform.cipher.key.data = NULL;
859 			cipher_xform.cipher.key.length = 0;
860 			cipher_xform.cipher.iv.length = 0;
861 		}
862 		cipher_xform.next = NULL;
863 
864 		if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
865 			direction = RTE_SECURITY_DOCSIS_DOWNLINK;
866 		else
867 			direction = RTE_SECURITY_DOCSIS_UPLINK;
868 
869 		struct rte_security_session_conf sess_conf = {
870 			.action_type =
871 				RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
872 			.protocol = RTE_SECURITY_PROTOCOL_DOCSIS,
873 			{.docsis = {
874 				.direction = direction,
875 			} },
876 			.crypto_xform = &cipher_xform
877 		};
878 		struct rte_security_ctx *ctx = (struct rte_security_ctx *)
879 					rte_cryptodev_get_sec_ctx(dev_id);
880 
881 		/* Create security session */
882 		return (void *)rte_security_session_create(ctx,
883 					&sess_conf, sess_mp, priv_mp);
884 	}
885 #endif
886 	sess = rte_cryptodev_sym_session_create(sess_mp);
887 	/*
888 	 * cipher only
889 	 */
890 	if (options->op_type == CPERF_CIPHER_ONLY) {
891 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
892 		cipher_xform.next = NULL;
893 		cipher_xform.cipher.algo = options->cipher_algo;
894 		cipher_xform.cipher.op = options->cipher_op;
895 		cipher_xform.cipher.iv.offset = iv_offset;
896 
897 		/* cipher different than null */
898 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
899 			cipher_xform.cipher.key.data =
900 					test_vector->cipher_key.data;
901 			cipher_xform.cipher.key.length =
902 					test_vector->cipher_key.length;
903 			cipher_xform.cipher.iv.length =
904 					test_vector->cipher_iv.length;
905 		} else {
906 			cipher_xform.cipher.key.data = NULL;
907 			cipher_xform.cipher.key.length = 0;
908 			cipher_xform.cipher.iv.length = 0;
909 		}
910 		/* create crypto session */
911 		rte_cryptodev_sym_session_init(dev_id, sess, &cipher_xform,
912 				priv_mp);
913 	/*
914 	 *  auth only
915 	 */
916 	} else if (options->op_type == CPERF_AUTH_ONLY) {
917 		auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
918 		auth_xform.next = NULL;
919 		auth_xform.auth.algo = options->auth_algo;
920 		auth_xform.auth.op = options->auth_op;
921 		auth_xform.auth.iv.offset = iv_offset;
922 
923 		/* auth different than null */
924 		if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
925 			auth_xform.auth.digest_length =
926 					options->digest_sz;
927 			auth_xform.auth.key.length =
928 					test_vector->auth_key.length;
929 			auth_xform.auth.key.data = test_vector->auth_key.data;
930 			auth_xform.auth.iv.length =
931 					test_vector->auth_iv.length;
932 		} else {
933 			auth_xform.auth.digest_length = 0;
934 			auth_xform.auth.key.length = 0;
935 			auth_xform.auth.key.data = NULL;
936 			auth_xform.auth.iv.length = 0;
937 		}
938 		/* create crypto session */
939 		rte_cryptodev_sym_session_init(dev_id, sess, &auth_xform,
940 				priv_mp);
941 	/*
942 	 * cipher and auth
943 	 */
944 	} else if (options->op_type == CPERF_CIPHER_THEN_AUTH
945 			|| options->op_type == CPERF_AUTH_THEN_CIPHER) {
946 		/*
947 		 * cipher
948 		 */
949 		cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
950 		cipher_xform.next = NULL;
951 		cipher_xform.cipher.algo = options->cipher_algo;
952 		cipher_xform.cipher.op = options->cipher_op;
953 		cipher_xform.cipher.iv.offset = iv_offset;
954 
955 		/* cipher different than null */
956 		if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
957 			cipher_xform.cipher.key.data =
958 					test_vector->cipher_key.data;
959 			cipher_xform.cipher.key.length =
960 					test_vector->cipher_key.length;
961 			cipher_xform.cipher.iv.length =
962 					test_vector->cipher_iv.length;
963 		} else {
964 			cipher_xform.cipher.key.data = NULL;
965 			cipher_xform.cipher.key.length = 0;
966 			cipher_xform.cipher.iv.length = 0;
967 		}
968 
969 		/*
970 		 * auth
971 		 */
972 		auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
973 		auth_xform.next = NULL;
974 		auth_xform.auth.algo = options->auth_algo;
975 		auth_xform.auth.op = options->auth_op;
976 		auth_xform.auth.iv.offset = iv_offset +
977 			cipher_xform.cipher.iv.length;
978 
979 		/* auth different than null */
980 		if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
981 			auth_xform.auth.digest_length = options->digest_sz;
982 			auth_xform.auth.iv.length = test_vector->auth_iv.length;
983 			auth_xform.auth.key.length =
984 					test_vector->auth_key.length;
985 			auth_xform.auth.key.data =
986 					test_vector->auth_key.data;
987 		} else {
988 			auth_xform.auth.digest_length = 0;
989 			auth_xform.auth.key.length = 0;
990 			auth_xform.auth.key.data = NULL;
991 			auth_xform.auth.iv.length = 0;
992 		}
993 
994 		/* cipher then auth */
995 		if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
996 			cipher_xform.next = &auth_xform;
997 			/* create crypto session */
998 			rte_cryptodev_sym_session_init(dev_id,
999 					sess, &cipher_xform, priv_mp);
1000 		} else { /* auth then cipher */
1001 			auth_xform.next = &cipher_xform;
1002 			/* create crypto session */
1003 			rte_cryptodev_sym_session_init(dev_id,
1004 					sess, &auth_xform, priv_mp);
1005 		}
1006 	} else { /* options->op_type == CPERF_AEAD */
1007 		aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1008 		aead_xform.next = NULL;
1009 		aead_xform.aead.algo = options->aead_algo;
1010 		aead_xform.aead.op = options->aead_op;
1011 		aead_xform.aead.iv.offset = iv_offset;
1012 
1013 		aead_xform.aead.key.data =
1014 					test_vector->aead_key.data;
1015 		aead_xform.aead.key.length =
1016 					test_vector->aead_key.length;
1017 		aead_xform.aead.iv.length = test_vector->aead_iv.length;
1018 
1019 		aead_xform.aead.digest_length = options->digest_sz;
1020 		aead_xform.aead.aad_length =
1021 					options->aead_aad_sz;
1022 
1023 		/* Create crypto session */
1024 		rte_cryptodev_sym_session_init(dev_id,
1025 					sess, &aead_xform, priv_mp);
1026 	}
1027 
1028 	return sess;
1029 }
1030 
1031 int
1032 cperf_get_op_functions(const struct cperf_options *options,
1033 		struct cperf_op_fns *op_fns)
1034 {
1035 	memset(op_fns, 0, sizeof(struct cperf_op_fns));
1036 
1037 	op_fns->sess_create = cperf_create_session;
1038 
1039 	switch (options->op_type) {
1040 	case CPERF_AEAD:
1041 		op_fns->populate_ops = cperf_set_ops_aead;
1042 		break;
1043 
1044 	case CPERF_AUTH_THEN_CIPHER:
1045 	case CPERF_CIPHER_THEN_AUTH:
1046 		op_fns->populate_ops = cperf_set_ops_cipher_auth;
1047 		break;
1048 	case CPERF_AUTH_ONLY:
1049 		if (options->auth_algo == RTE_CRYPTO_AUTH_NULL)
1050 			op_fns->populate_ops = cperf_set_ops_null_auth;
1051 		else
1052 			op_fns->populate_ops = cperf_set_ops_auth;
1053 		break;
1054 	case CPERF_CIPHER_ONLY:
1055 		if (options->cipher_algo == RTE_CRYPTO_CIPHER_NULL)
1056 			op_fns->populate_ops = cperf_set_ops_null_cipher;
1057 		else
1058 			op_fns->populate_ops = cperf_set_ops_cipher;
1059 		break;
1060 	case CPERF_ASYM_MODEX:
1061 		op_fns->populate_ops = cperf_set_ops_asym;
1062 		break;
1063 #ifdef RTE_LIB_SECURITY
1064 	case CPERF_PDCP:
1065 	case CPERF_IPSEC:
1066 	case CPERF_DOCSIS:
1067 		op_fns->populate_ops = cperf_set_ops_security;
1068 		break;
1069 #endif
1070 	default:
1071 		return -1;
1072 	}
1073 
1074 	return 0;
1075 }
1076