xref: /dpdk/app/test-crypto-perf/cperf_test_verify.c (revision 0fbd75a99fc9d2c8c7618d677d3f50fb9872b80c)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <rte_malloc.h>
34 #include <rte_cycles.h>
35 #include <rte_crypto.h>
36 #include <rte_cryptodev.h>
37 
38 #include "cperf_test_verify.h"
39 #include "cperf_ops.h"
40 
41 struct cperf_verify_ctx {
42 	uint8_t dev_id;
43 	uint16_t qp_id;
44 	uint8_t lcore_id;
45 
46 	struct rte_mempool *pkt_mbuf_pool_in;
47 	struct rte_mempool *pkt_mbuf_pool_out;
48 	struct rte_mbuf **mbufs_in;
49 	struct rte_mbuf **mbufs_out;
50 
51 	struct rte_mempool *crypto_op_pool;
52 
53 	struct rte_cryptodev_sym_session *sess;
54 
55 	cperf_populate_ops_t populate_ops;
56 
57 	const struct cperf_options *options;
58 	const struct cperf_test_vector *test_vector;
59 };
60 
61 struct cperf_op_result {
62 	enum rte_crypto_op_status status;
63 };
64 
65 static void
66 cperf_verify_test_free(struct cperf_verify_ctx *ctx, uint32_t mbuf_nb)
67 {
68 	uint32_t i;
69 
70 	if (ctx) {
71 		if (ctx->sess)
72 			rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess);
73 
74 		if (ctx->mbufs_in) {
75 			for (i = 0; i < mbuf_nb; i++)
76 				rte_pktmbuf_free(ctx->mbufs_in[i]);
77 
78 			rte_free(ctx->mbufs_in);
79 		}
80 
81 		if (ctx->mbufs_out) {
82 			for (i = 0; i < mbuf_nb; i++) {
83 				if (ctx->mbufs_out[i] != NULL)
84 					rte_pktmbuf_free(ctx->mbufs_out[i]);
85 			}
86 
87 			rte_free(ctx->mbufs_out);
88 		}
89 
90 		if (ctx->pkt_mbuf_pool_in)
91 			rte_mempool_free(ctx->pkt_mbuf_pool_in);
92 
93 		if (ctx->pkt_mbuf_pool_out)
94 			rte_mempool_free(ctx->pkt_mbuf_pool_out);
95 
96 		if (ctx->crypto_op_pool)
97 			rte_mempool_free(ctx->crypto_op_pool);
98 
99 		rte_free(ctx);
100 	}
101 }
102 
103 static struct rte_mbuf *
104 cperf_mbuf_create(struct rte_mempool *mempool,
105 		uint32_t segments_nb,
106 		const struct cperf_options *options,
107 		const struct cperf_test_vector *test_vector)
108 {
109 	struct rte_mbuf *mbuf;
110 	uint32_t segment_sz = options->max_buffer_size / segments_nb;
111 	uint32_t last_sz = options->max_buffer_size % segments_nb;
112 	uint8_t *mbuf_data;
113 	uint8_t *test_data =
114 			(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
115 					test_vector->plaintext.data :
116 					test_vector->ciphertext.data;
117 
118 	mbuf = rte_pktmbuf_alloc(mempool);
119 	if (mbuf == NULL)
120 		goto error;
121 
122 	mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
123 	if (mbuf_data == NULL)
124 		goto error;
125 
126 	memcpy(mbuf_data, test_data, segment_sz);
127 	test_data += segment_sz;
128 	segments_nb--;
129 
130 	while (segments_nb) {
131 		struct rte_mbuf *m;
132 
133 		m = rte_pktmbuf_alloc(mempool);
134 		if (m == NULL)
135 			goto error;
136 
137 		rte_pktmbuf_chain(mbuf, m);
138 
139 		mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
140 		if (mbuf_data == NULL)
141 			goto error;
142 
143 		memcpy(mbuf_data, test_data, segment_sz);
144 		test_data += segment_sz;
145 		segments_nb--;
146 	}
147 
148 	if (last_sz) {
149 		mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, last_sz);
150 		if (mbuf_data == NULL)
151 			goto error;
152 
153 		memcpy(mbuf_data, test_data, last_sz);
154 	}
155 
156 	if (options->op_type != CPERF_CIPHER_ONLY) {
157 		mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
158 				options->auth_digest_sz);
159 		if (mbuf_data == NULL)
160 			goto error;
161 	}
162 
163 	if (options->op_type == CPERF_AEAD) {
164 		uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
165 			RTE_ALIGN_CEIL(options->auth_aad_sz, 16));
166 
167 		if (aead == NULL)
168 			goto error;
169 
170 		memcpy(aead, test_vector->aad.data, test_vector->aad.length);
171 	}
172 
173 	return mbuf;
174 error:
175 	if (mbuf != NULL)
176 		rte_pktmbuf_free(mbuf);
177 
178 	return NULL;
179 }
180 
181 void *
182 cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
183 		const struct cperf_options *options,
184 		const struct cperf_test_vector *test_vector,
185 		const struct cperf_op_fns *op_fns)
186 {
187 	struct cperf_verify_ctx *ctx = NULL;
188 	unsigned int mbuf_idx = 0;
189 	char pool_name[32] = "";
190 
191 	ctx = rte_malloc(NULL, sizeof(struct cperf_verify_ctx), 0);
192 	if (ctx == NULL)
193 		goto err;
194 
195 	ctx->dev_id = dev_id;
196 	ctx->qp_id = qp_id;
197 
198 	ctx->populate_ops = op_fns->populate_ops;
199 	ctx->options = options;
200 	ctx->test_vector = test_vector;
201 
202 	/* IV goes at the end of the cryptop operation */
203 	uint16_t iv_offset = sizeof(struct rte_crypto_op) +
204 		sizeof(struct rte_crypto_sym_op);
205 
206 	ctx->sess = op_fns->sess_create(dev_id, options, test_vector, iv_offset);
207 	if (ctx->sess == NULL)
208 		goto err;
209 
210 	snprintf(pool_name, sizeof(pool_name), "cperf_pool_in_cdev_%d",
211 			dev_id);
212 
213 	ctx->pkt_mbuf_pool_in = rte_pktmbuf_pool_create(pool_name,
214 			options->pool_sz * options->segments_nb, 0, 0,
215 			RTE_PKTMBUF_HEADROOM +
216 			RTE_CACHE_LINE_ROUNDUP(
217 				(options->max_buffer_size / options->segments_nb) +
218 				(options->max_buffer_size % options->segments_nb) +
219 					options->auth_digest_sz),
220 			rte_socket_id());
221 
222 	if (ctx->pkt_mbuf_pool_in == NULL)
223 		goto err;
224 
225 	/* Generate mbufs_in with plaintext populated for test */
226 	ctx->mbufs_in = rte_malloc(NULL,
227 			(sizeof(struct rte_mbuf *) * ctx->options->pool_sz), 0);
228 
229 	for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
230 		ctx->mbufs_in[mbuf_idx] = cperf_mbuf_create(
231 				ctx->pkt_mbuf_pool_in, options->segments_nb,
232 				options, test_vector);
233 		if (ctx->mbufs_in[mbuf_idx] == NULL)
234 			goto err;
235 	}
236 
237 	if (options->out_of_place == 1)	{
238 
239 		snprintf(pool_name, sizeof(pool_name), "cperf_pool_out_cdev_%d",
240 				dev_id);
241 
242 		ctx->pkt_mbuf_pool_out = rte_pktmbuf_pool_create(
243 				pool_name, options->pool_sz, 0, 0,
244 				RTE_PKTMBUF_HEADROOM +
245 				RTE_CACHE_LINE_ROUNDUP(
246 					options->max_buffer_size +
247 					options->auth_digest_sz),
248 				rte_socket_id());
249 
250 		if (ctx->pkt_mbuf_pool_out == NULL)
251 			goto err;
252 	}
253 
254 	ctx->mbufs_out = rte_malloc(NULL,
255 			(sizeof(struct rte_mbuf *) *
256 			ctx->options->pool_sz), 0);
257 
258 	for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
259 		if (options->out_of_place == 1)	{
260 			ctx->mbufs_out[mbuf_idx] = cperf_mbuf_create(
261 					ctx->pkt_mbuf_pool_out, 1,
262 					options, test_vector);
263 			if (ctx->mbufs_out[mbuf_idx] == NULL)
264 				goto err;
265 		} else {
266 			ctx->mbufs_out[mbuf_idx] = NULL;
267 		}
268 	}
269 
270 	snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
271 			dev_id);
272 
273 	uint16_t priv_size = test_vector->iv.length;
274 	ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
275 			RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz,
276 			512, priv_size, rte_socket_id());
277 	if (ctx->crypto_op_pool == NULL)
278 		goto err;
279 
280 	return ctx;
281 err:
282 	cperf_verify_test_free(ctx, mbuf_idx);
283 
284 	return NULL;
285 }
286 
287 static int
288 cperf_verify_op(struct rte_crypto_op *op,
289 		const struct cperf_options *options,
290 		const struct cperf_test_vector *vector)
291 {
292 	const struct rte_mbuf *m;
293 	uint32_t len;
294 	uint16_t nb_segs;
295 	uint8_t *data;
296 	uint32_t cipher_offset, auth_offset;
297 	uint8_t	cipher, auth;
298 	int res = 0;
299 
300 	if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
301 		return 1;
302 
303 	if (op->sym->m_dst)
304 		m = op->sym->m_dst;
305 	else
306 		m = op->sym->m_src;
307 	nb_segs = m->nb_segs;
308 	len = 0;
309 	while (m && nb_segs != 0) {
310 		len += m->data_len;
311 		m = m->next;
312 		nb_segs--;
313 	}
314 
315 	data = rte_malloc(NULL, len, 0);
316 	if (data == NULL)
317 		return 1;
318 
319 	if (op->sym->m_dst)
320 		m = op->sym->m_dst;
321 	else
322 		m = op->sym->m_src;
323 	nb_segs = m->nb_segs;
324 	len = 0;
325 	while (m && nb_segs != 0) {
326 		memcpy(data + len, rte_pktmbuf_mtod(m, uint8_t *),
327 				m->data_len);
328 		len += m->data_len;
329 		m = m->next;
330 		nb_segs--;
331 	}
332 
333 	switch (options->op_type) {
334 	case CPERF_CIPHER_ONLY:
335 		cipher = 1;
336 		cipher_offset = 0;
337 		auth = 0;
338 		auth_offset = 0;
339 		break;
340 	case CPERF_CIPHER_THEN_AUTH:
341 		cipher = 1;
342 		cipher_offset = 0;
343 		auth = 1;
344 		auth_offset = options->test_buffer_size;
345 		break;
346 	case CPERF_AUTH_ONLY:
347 		cipher = 0;
348 		cipher_offset = 0;
349 		auth = 1;
350 		auth_offset = options->test_buffer_size;
351 		break;
352 	case CPERF_AUTH_THEN_CIPHER:
353 		cipher = 1;
354 		cipher_offset = 0;
355 		auth = 1;
356 		auth_offset = options->test_buffer_size;
357 		break;
358 	case CPERF_AEAD:
359 		cipher = 1;
360 		cipher_offset = vector->aad.length;
361 		auth = 1;
362 		auth_offset = vector->aad.length + options->test_buffer_size;
363 		break;
364 	}
365 
366 	if (cipher == 1) {
367 		if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
368 			res += memcmp(data + cipher_offset,
369 					vector->ciphertext.data,
370 					options->test_buffer_size);
371 		else
372 			res += memcmp(data + cipher_offset,
373 					vector->plaintext.data,
374 					options->test_buffer_size);
375 	}
376 
377 	if (auth == 1) {
378 		if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE)
379 			res += memcmp(data + auth_offset,
380 					vector->digest.data,
381 					options->auth_digest_sz);
382 	}
383 
384 	return !!res;
385 }
386 
387 int
388 cperf_verify_test_runner(void *test_ctx)
389 {
390 	struct cperf_verify_ctx *ctx = test_ctx;
391 
392 	uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
393 	uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
394 	uint64_t ops_failed = 0;
395 
396 	static int only_once;
397 
398 	uint64_t i, m_idx = 0;
399 	uint16_t ops_unused = 0;
400 
401 	struct rte_crypto_op *ops[ctx->options->max_burst_size];
402 	struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
403 
404 	uint32_t lcore = rte_lcore_id();
405 
406 #ifdef CPERF_LINEARIZATION_ENABLE
407 	struct rte_cryptodev_info dev_info;
408 	int linearize = 0;
409 
410 	/* Check if source mbufs require coalescing */
411 	if (ctx->options->segments_nb > 1) {
412 		rte_cryptodev_info_get(ctx->dev_id, &dev_info);
413 		if ((dev_info.feature_flags &
414 				RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
415 			linearize = 1;
416 	}
417 #endif /* CPERF_LINEARIZATION_ENABLE */
418 
419 	ctx->lcore_id = lcore;
420 
421 	if (!ctx->options->csv)
422 		printf("\n# Running verify test on device: %u, lcore: %u\n",
423 			ctx->dev_id, lcore);
424 
425 	uint16_t iv_offset = sizeof(struct rte_crypto_op) +
426 		sizeof(struct rte_crypto_sym_op);
427 
428 	while (ops_enqd_total < ctx->options->total_ops) {
429 
430 		uint16_t burst_size = ((ops_enqd_total + ctx->options->max_burst_size)
431 				<= ctx->options->total_ops) ?
432 						ctx->options->max_burst_size :
433 						ctx->options->total_ops -
434 						ops_enqd_total;
435 
436 		uint16_t ops_needed = burst_size - ops_unused;
437 
438 		/* Allocate crypto ops from pool */
439 		if (ops_needed != rte_crypto_op_bulk_alloc(
440 				ctx->crypto_op_pool,
441 				RTE_CRYPTO_OP_TYPE_SYMMETRIC,
442 				ops, ops_needed))
443 			return -1;
444 
445 		/* Setup crypto op, attach mbuf etc */
446 		(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
447 				&ctx->mbufs_out[m_idx],
448 				ops_needed, ctx->sess, ctx->options,
449 				ctx->test_vector, iv_offset);
450 
451 #ifdef CPERF_LINEARIZATION_ENABLE
452 		if (linearize) {
453 			/* PMD doesn't support scatter-gather and source buffer
454 			 * is segmented.
455 			 * We need to linearize it before enqueuing.
456 			 */
457 			for (i = 0; i < burst_size; i++)
458 				rte_pktmbuf_linearize(ops[i]->sym->m_src);
459 		}
460 #endif /* CPERF_LINEARIZATION_ENABLE */
461 
462 		/* Enqueue burst of ops on crypto device */
463 		ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
464 				ops, burst_size);
465 		if (ops_enqd < burst_size)
466 			ops_enqd_failed++;
467 
468 		/**
469 		 * Calculate number of ops not enqueued (mainly for hw
470 		 * accelerators whose ingress queue can fill up).
471 		 */
472 		ops_unused = burst_size - ops_enqd;
473 		ops_enqd_total += ops_enqd;
474 
475 
476 		/* Dequeue processed burst of ops from crypto device */
477 		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
478 				ops_processed, ctx->options->max_burst_size);
479 
480 		m_idx += ops_needed;
481 		if (m_idx + ctx->options->max_burst_size > ctx->options->pool_sz)
482 			m_idx = 0;
483 
484 		if (ops_deqd == 0) {
485 			/**
486 			 * Count dequeue polls which didn't return any
487 			 * processed operations. This statistic is mainly
488 			 * relevant to hw accelerators.
489 			 */
490 			ops_deqd_failed++;
491 			continue;
492 		}
493 
494 		for (i = 0; i < ops_deqd; i++) {
495 			if (cperf_verify_op(ops_processed[i], ctx->options,
496 						ctx->test_vector))
497 				ops_failed++;
498 			/* free crypto ops so they can be reused. We don't free
499 			 * the mbufs here as we don't want to reuse them as
500 			 * the crypto operation will change the data and cause
501 			 * failures.
502 			 */
503 			rte_crypto_op_free(ops_processed[i]);
504 		}
505 		ops_deqd_total += ops_deqd;
506 	}
507 
508 	/* Dequeue any operations still in the crypto device */
509 
510 	while (ops_deqd_total < ctx->options->total_ops) {
511 		/* Sending 0 length burst to flush sw crypto device */
512 		rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
513 
514 		/* dequeue burst */
515 		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
516 				ops_processed, ctx->options->max_burst_size);
517 		if (ops_deqd == 0) {
518 			ops_deqd_failed++;
519 			continue;
520 		}
521 
522 		for (i = 0; i < ops_deqd; i++) {
523 			if (cperf_verify_op(ops_processed[i], ctx->options,
524 						ctx->test_vector))
525 				ops_failed++;
526 			/* free crypto ops so they can be reused. We don't free
527 			 * the mbufs here as we don't want to reuse them as
528 			 * the crypto operation will change the data and cause
529 			 * failures.
530 			 */
531 			rte_crypto_op_free(ops_processed[i]);
532 		}
533 		ops_deqd_total += ops_deqd;
534 	}
535 
536 	if (!ctx->options->csv) {
537 		if (!only_once)
538 			printf("%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
539 				"lcore id", "Buf Size", "Burst size",
540 				"Enqueued", "Dequeued", "Failed Enq",
541 				"Failed Deq", "Failed Ops");
542 		only_once = 1;
543 
544 		printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64
545 				"%12"PRIu64"%12"PRIu64"\n",
546 				ctx->lcore_id,
547 				ctx->options->max_buffer_size,
548 				ctx->options->max_burst_size,
549 				ops_enqd_total,
550 				ops_deqd_total,
551 				ops_enqd_failed,
552 				ops_deqd_failed,
553 				ops_failed);
554 	} else {
555 		if (!only_once)
556 			printf("\n# lcore id, Buffer Size(B), "
557 				"Burst Size,Enqueued,Dequeued,Failed Enq,"
558 				"Failed Deq,Failed Ops\n");
559 		only_once = 1;
560 
561 		printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
562 				"%"PRIu64"\n",
563 				ctx->lcore_id,
564 				ctx->options->max_buffer_size,
565 				ctx->options->max_burst_size,
566 				ops_enqd_total,
567 				ops_deqd_total,
568 				ops_enqd_failed,
569 				ops_deqd_failed,
570 				ops_failed);
571 	}
572 
573 	return 0;
574 }
575 
576 
577 
578 void
579 cperf_verify_test_destructor(void *arg)
580 {
581 	struct cperf_verify_ctx *ctx = arg;
582 
583 	if (ctx == NULL)
584 		return;
585 
586 	cperf_verify_test_free(ctx, ctx->options->pool_sz);
587 }
588