xref: /dpdk/app/test-crypto-perf/cperf_test_verify.c (revision 76a48d8ac0928d1a0afbd9f7abfe1d2d9f6d9aaf)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <stdlib.h>
6 
7 #include <rte_malloc.h>
8 #include <rte_cycles.h>
9 #include <rte_crypto.h>
10 #include <rte_cryptodev.h>
11 
12 #include "cperf_test_verify.h"
13 #include "cperf_ops.h"
14 #include "cperf_test_common.h"
15 
16 struct cperf_verify_ctx {
17 	uint8_t dev_id;
18 	uint16_t qp_id;
19 	uint8_t lcore_id;
20 
21 	struct rte_mempool *pool;
22 
23 	void *sess;
24 	uint8_t sess_owner;
25 
26 	cperf_populate_ops_t populate_ops;
27 
28 	uint32_t src_buf_offset;
29 	uint32_t dst_buf_offset;
30 
31 	const struct cperf_options *options;
32 	const struct cperf_test_vector *test_vector;
33 };
34 
35 struct cperf_op_result {
36 	enum rte_crypto_op_status status;
37 };
38 
39 static void
40 cperf_verify_test_free(struct cperf_verify_ctx *ctx)
41 {
42 	if (ctx == NULL)
43 		return;
44 
45 	if (ctx->sess != NULL && ctx->sess_owner) {
46 		if (cperf_is_asym_test(ctx->options))
47 			rte_cryptodev_asym_session_free(ctx->dev_id, ctx->sess);
48 #ifdef RTE_LIB_SECURITY
49 		else if (ctx->options->op_type == CPERF_PDCP ||
50 			 ctx->options->op_type == CPERF_DOCSIS ||
51 			 ctx->options->op_type == CPERF_TLS ||
52 			 ctx->options->op_type == CPERF_IPSEC) {
53 			void *sec_ctx = rte_cryptodev_get_sec_ctx(ctx->dev_id);
54 
55 			rte_security_session_destroy(sec_ctx, ctx->sess);
56 		}
57 #endif
58 		else
59 			rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess);
60 	}
61 
62 	rte_mempool_free(ctx->pool);
63 	rte_free(ctx);
64 }
65 
66 void *
67 cperf_verify_test_constructor(struct rte_mempool *sess_mp,
68 		uint8_t dev_id, uint16_t qp_id,
69 		const struct cperf_options *options,
70 		const struct cperf_test_vector *test_vector,
71 		const struct cperf_op_fns *op_fns,
72 		void **sess)
73 {
74 	struct cperf_verify_ctx *ctx = NULL;
75 
76 	ctx = rte_malloc(NULL, sizeof(struct cperf_verify_ctx), 0);
77 	if (ctx == NULL)
78 		goto err;
79 
80 	ctx->dev_id = dev_id;
81 	ctx->qp_id = qp_id;
82 
83 	ctx->populate_ops = op_fns->populate_ops;
84 	ctx->options = options;
85 	ctx->test_vector = test_vector;
86 
87 	/* IV goes at the end of the crypto operation */
88 	uint16_t iv_offset = sizeof(struct rte_crypto_op) +
89 		sizeof(struct rte_crypto_sym_op);
90 
91 	if (*sess != NULL) {
92 		ctx->sess = *sess;
93 		ctx->sess_owner = false;
94 	} else {
95 		ctx->sess = op_fns->sess_create(sess_mp, dev_id, options,
96 				test_vector, iv_offset);
97 		if (ctx->sess == NULL)
98 			goto err;
99 		*sess = ctx->sess;
100 		ctx->sess_owner = true;
101 	}
102 
103 	if (cperf_alloc_common_memory(options, test_vector, dev_id, qp_id, 0,
104 			&ctx->src_buf_offset, &ctx->dst_buf_offset,
105 			&ctx->pool) < 0)
106 		goto err;
107 
108 	return ctx;
109 err:
110 	cperf_verify_test_free(ctx);
111 
112 	return NULL;
113 }
114 
115 static int
116 cperf_verify_op(struct rte_crypto_op *op,
117 		const struct cperf_options *options,
118 		const struct cperf_test_vector *vector)
119 {
120 	const struct rte_mbuf *m;
121 	uint32_t len;
122 	uint16_t nb_segs;
123 	uint8_t *data;
124 	uint32_t cipher_offset, auth_offset = 0;
125 	bool cipher = false;
126 	bool digest_verify = false;
127 	bool is_encrypt = false;
128 	int res = 0;
129 
130 	if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
131 		return 1;
132 
133 	if (op->sym->m_dst)
134 		m = op->sym->m_dst;
135 	else
136 		m = op->sym->m_src;
137 	nb_segs = m->nb_segs;
138 	len = 0;
139 	while (m && nb_segs != 0) {
140 		len += m->data_len;
141 		m = m->next;
142 		nb_segs--;
143 	}
144 
145 	data = rte_malloc(NULL, len, 0);
146 	if (data == NULL)
147 		return 1;
148 
149 	if (op->sym->m_dst)
150 		m = op->sym->m_dst;
151 	else
152 		m = op->sym->m_src;
153 	nb_segs = m->nb_segs;
154 	len = 0;
155 	while (m && nb_segs != 0) {
156 		memcpy(data + len, rte_pktmbuf_mtod(m, uint8_t *),
157 				m->data_len);
158 		len += m->data_len;
159 		m = m->next;
160 		nb_segs--;
161 	}
162 
163 	switch (options->op_type) {
164 	case CPERF_CIPHER_ONLY:
165 		cipher = true;
166 		cipher_offset = 0;
167 		is_encrypt = options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT;
168 		break;
169 	case CPERF_AUTH_ONLY:
170 		cipher_offset = 0;
171 		if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE) {
172 			auth_offset = options->test_buffer_size;
173 			digest_verify = true;
174 		}
175 		break;
176 	case CPERF_CIPHER_THEN_AUTH:
177 	case CPERF_AUTH_THEN_CIPHER:
178 		cipher = true;
179 		cipher_offset = 0;
180 		if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
181 			auth_offset = options->test_buffer_size;
182 			digest_verify = true;
183 			is_encrypt = true;
184 		}
185 		break;
186 	case CPERF_AEAD:
187 		cipher = true;
188 		cipher_offset = 0;
189 		if (options->aead_op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
190 			auth_offset = options->test_buffer_size;
191 			digest_verify = true;
192 			is_encrypt = true;
193 		}
194 		break;
195 	default:
196 		res = 1;
197 		goto out;
198 	}
199 
200 	if (cipher) {
201 		if (is_encrypt)
202 			res += !!memcmp(data + cipher_offset,
203 					vector->ciphertext.data,
204 					options->test_buffer_size);
205 		else
206 			res += !!memcmp(data + cipher_offset,
207 					vector->plaintext.data,
208 					options->test_buffer_size);
209 	}
210 
211 	if (digest_verify)
212 		res += !!memcmp(data + auth_offset, vector->digest.data, options->digest_sz);
213 
214 out:
215 	rte_free(data);
216 	return !!res;
217 }
218 
219 int
220 cperf_verify_test_runner(void *test_ctx)
221 {
222 	struct cperf_verify_ctx *ctx = test_ctx;
223 
224 	uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
225 	uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
226 	uint64_t ops_failed = 0;
227 
228 	static RTE_ATOMIC(uint16_t) display_once;
229 
230 	uint64_t i;
231 	uint16_t ops_unused = 0;
232 	uint32_t imix_idx = 0;
233 
234 	struct rte_crypto_op *ops[ctx->options->max_burst_size];
235 	struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
236 
237 	uint32_t lcore = rte_lcore_id();
238 
239 #ifdef CPERF_LINEARIZATION_ENABLE
240 	struct rte_cryptodev_info dev_info;
241 	int linearize = 0;
242 
243 	/* Check if source mbufs require coalescing */
244 	if (ctx->options->segment_sz < ctx->options->max_buffer_size) {
245 		rte_cryptodev_info_get(ctx->dev_id, &dev_info);
246 		if ((dev_info.feature_flags &
247 				RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
248 			linearize = 1;
249 	}
250 #endif /* CPERF_LINEARIZATION_ENABLE */
251 
252 	ctx->lcore_id = lcore;
253 
254 	if (!ctx->options->csv)
255 		printf("\n# Running verify test on device: %u, lcore: %u\n",
256 			ctx->dev_id, lcore);
257 
258 	uint16_t iv_offset = sizeof(struct rte_crypto_op) +
259 		sizeof(struct rte_crypto_sym_op);
260 
261 	while (ops_enqd_total < ctx->options->total_ops) {
262 
263 		uint16_t burst_size = ((ops_enqd_total + ctx->options->max_burst_size)
264 				<= ctx->options->total_ops) ?
265 						ctx->options->max_burst_size :
266 						ctx->options->total_ops -
267 						ops_enqd_total;
268 
269 		uint16_t ops_needed = burst_size - ops_unused;
270 
271 		/* Allocate objects containing crypto operations and mbufs */
272 		if (rte_mempool_get_bulk(ctx->pool, (void **)ops,
273 					ops_needed) != 0) {
274 			RTE_LOG(ERR, USER1,
275 				"Failed to allocate more crypto operations "
276 				"from the crypto operation pool.\n"
277 				"Consider increasing the pool size "
278 				"with --pool-sz\n");
279 			return -1;
280 		}
281 
282 		/* Setup crypto op, attach mbuf etc */
283 		(ctx->populate_ops)(ops, ctx->src_buf_offset,
284 				ctx->dst_buf_offset,
285 				ops_needed, ctx->sess, ctx->options,
286 				ctx->test_vector, iv_offset, &imix_idx, NULL);
287 
288 		/* Populate the mbuf with the test vector, for verification */
289 		for (i = 0; i < ops_needed; i++)
290 			cperf_mbuf_set(ops[i]->sym->m_src,
291 					ctx->options,
292 					ctx->test_vector);
293 
294 #ifdef CPERF_LINEARIZATION_ENABLE
295 		if (linearize) {
296 			/* PMD doesn't support scatter-gather and source buffer
297 			 * is segmented.
298 			 * We need to linearize it before enqueuing.
299 			 */
300 			for (i = 0; i < burst_size; i++)
301 				rte_pktmbuf_linearize(ops[i]->sym->m_src);
302 		}
303 #endif /* CPERF_LINEARIZATION_ENABLE */
304 
305 		/**
306 		 * When ops_needed is smaller than ops_enqd, the
307 		 * unused ops need to be moved to the front for
308 		 * next round use.
309 		 */
310 		if (unlikely(ops_enqd > ops_needed)) {
311 			size_t nb_b_to_mov = ops_unused * sizeof(struct rte_crypto_op *);
312 
313 			memmove(&ops[ops_needed], &ops[ops_enqd], nb_b_to_mov);
314 		}
315 
316 		/* Enqueue burst of ops on crypto device */
317 		ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
318 				ops, burst_size);
319 		if (ops_enqd < burst_size)
320 			ops_enqd_failed++;
321 
322 		/**
323 		 * Calculate number of ops not enqueued (mainly for hw
324 		 * accelerators whose ingress queue can fill up).
325 		 */
326 		ops_unused = burst_size - ops_enqd;
327 		ops_enqd_total += ops_enqd;
328 
329 
330 		/* Dequeue processed burst of ops from crypto device */
331 		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
332 				ops_processed, ctx->options->max_burst_size);
333 
334 		if (ops_deqd == 0) {
335 			/**
336 			 * Count dequeue polls which didn't return any
337 			 * processed operations. This statistic is mainly
338 			 * relevant to hw accelerators.
339 			 */
340 			ops_deqd_failed++;
341 			continue;
342 		}
343 
344 		for (i = 0; i < ops_deqd; i++) {
345 			if (cperf_verify_op(ops_processed[i], ctx->options,
346 						ctx->test_vector))
347 				ops_failed++;
348 		}
349 		/* Free crypto ops so they can be reused. */
350 		rte_mempool_put_bulk(ctx->pool,
351 					(void **)ops_processed, ops_deqd);
352 		ops_deqd_total += ops_deqd;
353 	}
354 
355 	/* Dequeue any operations still in the crypto device */
356 
357 	while (ops_deqd_total < ctx->options->total_ops) {
358 		/* Sending 0 length burst to flush sw crypto device */
359 		rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
360 
361 		/* dequeue burst */
362 		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
363 				ops_processed, ctx->options->max_burst_size);
364 		if (ops_deqd == 0) {
365 			ops_deqd_failed++;
366 			continue;
367 		}
368 
369 		for (i = 0; i < ops_deqd; i++) {
370 			if (cperf_verify_op(ops_processed[i], ctx->options,
371 						ctx->test_vector))
372 				ops_failed++;
373 		}
374 		/* Free crypto ops so they can be reused. */
375 		rte_mempool_put_bulk(ctx->pool,
376 					(void **)ops_processed, ops_deqd);
377 		ops_deqd_total += ops_deqd;
378 	}
379 
380 	uint16_t exp = 0;
381 	if (!ctx->options->csv) {
382 		if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
383 				rte_memory_order_relaxed, rte_memory_order_relaxed))
384 			printf("%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
385 				"lcore id", "Buf Size", "Burst size",
386 				"Enqueued", "Dequeued", "Failed Enq",
387 				"Failed Deq", "Failed Ops");
388 
389 		printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64
390 				"%12"PRIu64"%12"PRIu64"\n",
391 				ctx->lcore_id,
392 				ctx->options->max_buffer_size,
393 				ctx->options->max_burst_size,
394 				ops_enqd_total,
395 				ops_deqd_total,
396 				ops_enqd_failed,
397 				ops_deqd_failed,
398 				ops_failed);
399 	} else {
400 		if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
401 				rte_memory_order_relaxed, rte_memory_order_relaxed))
402 			printf("\n# lcore id, Buffer Size(B), "
403 				"Burst Size,Enqueued,Dequeued,Failed Enq,"
404 				"Failed Deq,Failed Ops\n");
405 
406 		printf("%10u,%10u,%u,%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64","
407 				"%"PRIu64"\n",
408 				ctx->lcore_id,
409 				ctx->options->max_buffer_size,
410 				ctx->options->max_burst_size,
411 				ops_enqd_total,
412 				ops_deqd_total,
413 				ops_enqd_failed,
414 				ops_deqd_failed,
415 				ops_failed);
416 	}
417 
418 	return 0;
419 }
420 
421 
422 
423 void
424 cperf_verify_test_destructor(void *arg)
425 {
426 	struct cperf_verify_ctx *ctx = arg;
427 
428 	if (ctx == NULL)
429 		return;
430 
431 	cperf_verify_test_free(ctx);
432 }
433