xref: /dpdk/app/test-crypto-perf/cperf_test_verify.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <stdlib.h>
6 
7 #include <rte_malloc.h>
8 #include <rte_cycles.h>
9 #include <rte_crypto.h>
10 #include <rte_cryptodev.h>
11 
12 #include "cperf_test_verify.h"
13 #include "cperf_ops.h"
14 #include "cperf_test_common.h"
15 
16 struct cperf_verify_ctx {
17 	uint8_t dev_id;
18 	uint16_t qp_id;
19 	uint8_t lcore_id;
20 
21 	struct rte_mempool *pool;
22 
23 	void *sess;
24 
25 	cperf_populate_ops_t populate_ops;
26 
27 	uint32_t src_buf_offset;
28 	uint32_t dst_buf_offset;
29 
30 	const struct cperf_options *options;
31 	const struct cperf_test_vector *test_vector;
32 };
33 
34 struct cperf_op_result {
35 	enum rte_crypto_op_status status;
36 };
37 
38 static void
39 cperf_verify_test_free(struct cperf_verify_ctx *ctx)
40 {
41 	if (ctx == NULL)
42 		return;
43 
44 	if (ctx->sess != NULL) {
45 		if (ctx->options->op_type == CPERF_ASYM_MODEX)
46 			rte_cryptodev_asym_session_free(ctx->dev_id, ctx->sess);
47 #ifdef RTE_LIB_SECURITY
48 		else if (ctx->options->op_type == CPERF_PDCP ||
49 			 ctx->options->op_type == CPERF_DOCSIS ||
50 			 ctx->options->op_type == CPERF_TLS ||
51 			 ctx->options->op_type == CPERF_IPSEC) {
52 			void *sec_ctx = rte_cryptodev_get_sec_ctx(ctx->dev_id);
53 
54 			rte_security_session_destroy(sec_ctx, ctx->sess);
55 		}
56 #endif
57 		else
58 			rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess);
59 	}
60 
61 	rte_mempool_free(ctx->pool);
62 	rte_free(ctx);
63 }
64 
65 void *
66 cperf_verify_test_constructor(struct rte_mempool *sess_mp,
67 		uint8_t dev_id, uint16_t qp_id,
68 		const struct cperf_options *options,
69 		const struct cperf_test_vector *test_vector,
70 		const struct cperf_op_fns *op_fns)
71 {
72 	struct cperf_verify_ctx *ctx = NULL;
73 
74 	ctx = rte_malloc(NULL, sizeof(struct cperf_verify_ctx), 0);
75 	if (ctx == NULL)
76 		goto err;
77 
78 	ctx->dev_id = dev_id;
79 	ctx->qp_id = qp_id;
80 
81 	ctx->populate_ops = op_fns->populate_ops;
82 	ctx->options = options;
83 	ctx->test_vector = test_vector;
84 
85 	/* IV goes at the end of the crypto operation */
86 	uint16_t iv_offset = sizeof(struct rte_crypto_op) +
87 		sizeof(struct rte_crypto_sym_op);
88 
89 	ctx->sess = op_fns->sess_create(sess_mp, dev_id, options,
90 			test_vector, iv_offset);
91 	if (ctx->sess == NULL)
92 		goto err;
93 
94 	if (cperf_alloc_common_memory(options, test_vector, dev_id, qp_id, 0,
95 			&ctx->src_buf_offset, &ctx->dst_buf_offset,
96 			&ctx->pool) < 0)
97 		goto err;
98 
99 	return ctx;
100 err:
101 	cperf_verify_test_free(ctx);
102 
103 	return NULL;
104 }
105 
106 static int
107 cperf_verify_op(struct rte_crypto_op *op,
108 		const struct cperf_options *options,
109 		const struct cperf_test_vector *vector)
110 {
111 	const struct rte_mbuf *m;
112 	uint32_t len;
113 	uint16_t nb_segs;
114 	uint8_t *data;
115 	uint32_t cipher_offset, auth_offset = 0;
116 	bool cipher = false;
117 	bool digest_verify = false;
118 	bool is_encrypt = false;
119 	int res = 0;
120 
121 	if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
122 		return 1;
123 
124 	if (op->sym->m_dst)
125 		m = op->sym->m_dst;
126 	else
127 		m = op->sym->m_src;
128 	nb_segs = m->nb_segs;
129 	len = 0;
130 	while (m && nb_segs != 0) {
131 		len += m->data_len;
132 		m = m->next;
133 		nb_segs--;
134 	}
135 
136 	data = rte_malloc(NULL, len, 0);
137 	if (data == NULL)
138 		return 1;
139 
140 	if (op->sym->m_dst)
141 		m = op->sym->m_dst;
142 	else
143 		m = op->sym->m_src;
144 	nb_segs = m->nb_segs;
145 	len = 0;
146 	while (m && nb_segs != 0) {
147 		memcpy(data + len, rte_pktmbuf_mtod(m, uint8_t *),
148 				m->data_len);
149 		len += m->data_len;
150 		m = m->next;
151 		nb_segs--;
152 	}
153 
154 	switch (options->op_type) {
155 	case CPERF_CIPHER_ONLY:
156 		cipher = true;
157 		cipher_offset = 0;
158 		is_encrypt = options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT;
159 		break;
160 	case CPERF_AUTH_ONLY:
161 		cipher_offset = 0;
162 		if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE) {
163 			auth_offset = options->test_buffer_size;
164 			digest_verify = true;
165 		}
166 		break;
167 	case CPERF_CIPHER_THEN_AUTH:
168 	case CPERF_AUTH_THEN_CIPHER:
169 		cipher = true;
170 		cipher_offset = 0;
171 		if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
172 			auth_offset = options->test_buffer_size;
173 			digest_verify = true;
174 			is_encrypt = true;
175 		}
176 		break;
177 	case CPERF_AEAD:
178 		cipher = true;
179 		cipher_offset = 0;
180 		if (options->aead_op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
181 			auth_offset = options->test_buffer_size;
182 			digest_verify = true;
183 			is_encrypt = true;
184 		}
185 		break;
186 	default:
187 		res = 1;
188 		goto out;
189 	}
190 
191 	if (cipher) {
192 		if (is_encrypt)
193 			res += !!memcmp(data + cipher_offset,
194 					vector->ciphertext.data,
195 					options->test_buffer_size);
196 		else
197 			res += !!memcmp(data + cipher_offset,
198 					vector->plaintext.data,
199 					options->test_buffer_size);
200 	}
201 
202 	if (digest_verify)
203 		res += !!memcmp(data + auth_offset, vector->digest.data, options->digest_sz);
204 
205 out:
206 	rte_free(data);
207 	return !!res;
208 }
209 
210 int
211 cperf_verify_test_runner(void *test_ctx)
212 {
213 	struct cperf_verify_ctx *ctx = test_ctx;
214 
215 	uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
216 	uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
217 	uint64_t ops_failed = 0;
218 
219 	static uint16_t display_once;
220 
221 	uint64_t i;
222 	uint16_t ops_unused = 0;
223 	uint32_t imix_idx = 0;
224 
225 	struct rte_crypto_op *ops[ctx->options->max_burst_size];
226 	struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
227 
228 	uint32_t lcore = rte_lcore_id();
229 
230 #ifdef CPERF_LINEARIZATION_ENABLE
231 	struct rte_cryptodev_info dev_info;
232 	int linearize = 0;
233 
234 	/* Check if source mbufs require coalescing */
235 	if (ctx->options->segment_sz < ctx->options->max_buffer_size) {
236 		rte_cryptodev_info_get(ctx->dev_id, &dev_info);
237 		if ((dev_info.feature_flags &
238 				RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
239 			linearize = 1;
240 	}
241 #endif /* CPERF_LINEARIZATION_ENABLE */
242 
243 	ctx->lcore_id = lcore;
244 
245 	if (!ctx->options->csv)
246 		printf("\n# Running verify test on device: %u, lcore: %u\n",
247 			ctx->dev_id, lcore);
248 
249 	uint16_t iv_offset = sizeof(struct rte_crypto_op) +
250 		sizeof(struct rte_crypto_sym_op);
251 
252 	while (ops_enqd_total < ctx->options->total_ops) {
253 
254 		uint16_t burst_size = ((ops_enqd_total + ctx->options->max_burst_size)
255 				<= ctx->options->total_ops) ?
256 						ctx->options->max_burst_size :
257 						ctx->options->total_ops -
258 						ops_enqd_total;
259 
260 		uint16_t ops_needed = burst_size - ops_unused;
261 
262 		/* Allocate objects containing crypto operations and mbufs */
263 		if (rte_mempool_get_bulk(ctx->pool, (void **)ops,
264 					ops_needed) != 0) {
265 			RTE_LOG(ERR, USER1,
266 				"Failed to allocate more crypto operations "
267 				"from the crypto operation pool.\n"
268 				"Consider increasing the pool size "
269 				"with --pool-sz\n");
270 			return -1;
271 		}
272 
273 		/* Setup crypto op, attach mbuf etc */
274 		(ctx->populate_ops)(ops, ctx->src_buf_offset,
275 				ctx->dst_buf_offset,
276 				ops_needed, ctx->sess, ctx->options,
277 				ctx->test_vector, iv_offset, &imix_idx, NULL);
278 
279 		/* Populate the mbuf with the test vector, for verification */
280 		for (i = 0; i < ops_needed; i++)
281 			cperf_mbuf_set(ops[i]->sym->m_src,
282 					ctx->options,
283 					ctx->test_vector);
284 
285 #ifdef CPERF_LINEARIZATION_ENABLE
286 		if (linearize) {
287 			/* PMD doesn't support scatter-gather and source buffer
288 			 * is segmented.
289 			 * We need to linearize it before enqueuing.
290 			 */
291 			for (i = 0; i < burst_size; i++)
292 				rte_pktmbuf_linearize(ops[i]->sym->m_src);
293 		}
294 #endif /* CPERF_LINEARIZATION_ENABLE */
295 
296 		/**
297 		 * When ops_needed is smaller than ops_enqd, the
298 		 * unused ops need to be moved to the front for
299 		 * next round use.
300 		 */
301 		if (unlikely(ops_enqd > ops_needed)) {
302 			size_t nb_b_to_mov = ops_unused * sizeof(struct rte_crypto_op *);
303 
304 			memmove(&ops[ops_needed], &ops[ops_enqd], nb_b_to_mov);
305 		}
306 
307 		/* Enqueue burst of ops on crypto device */
308 		ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
309 				ops, burst_size);
310 		if (ops_enqd < burst_size)
311 			ops_enqd_failed++;
312 
313 		/**
314 		 * Calculate number of ops not enqueued (mainly for hw
315 		 * accelerators whose ingress queue can fill up).
316 		 */
317 		ops_unused = burst_size - ops_enqd;
318 		ops_enqd_total += ops_enqd;
319 
320 
321 		/* Dequeue processed burst of ops from crypto device */
322 		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
323 				ops_processed, ctx->options->max_burst_size);
324 
325 		if (ops_deqd == 0) {
326 			/**
327 			 * Count dequeue polls which didn't return any
328 			 * processed operations. This statistic is mainly
329 			 * relevant to hw accelerators.
330 			 */
331 			ops_deqd_failed++;
332 			continue;
333 		}
334 
335 		for (i = 0; i < ops_deqd; i++) {
336 			if (cperf_verify_op(ops_processed[i], ctx->options,
337 						ctx->test_vector))
338 				ops_failed++;
339 		}
340 		/* Free crypto ops so they can be reused. */
341 		rte_mempool_put_bulk(ctx->pool,
342 					(void **)ops_processed, ops_deqd);
343 		ops_deqd_total += ops_deqd;
344 	}
345 
346 	/* Dequeue any operations still in the crypto device */
347 
348 	while (ops_deqd_total < ctx->options->total_ops) {
349 		/* Sending 0 length burst to flush sw crypto device */
350 		rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
351 
352 		/* dequeue burst */
353 		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
354 				ops_processed, ctx->options->max_burst_size);
355 		if (ops_deqd == 0) {
356 			ops_deqd_failed++;
357 			continue;
358 		}
359 
360 		for (i = 0; i < ops_deqd; i++) {
361 			if (cperf_verify_op(ops_processed[i], ctx->options,
362 						ctx->test_vector))
363 				ops_failed++;
364 		}
365 		/* Free crypto ops so they can be reused. */
366 		rte_mempool_put_bulk(ctx->pool,
367 					(void **)ops_processed, ops_deqd);
368 		ops_deqd_total += ops_deqd;
369 	}
370 
371 	uint16_t exp = 0;
372 	if (!ctx->options->csv) {
373 		if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
374 				__ATOMIC_RELAXED, __ATOMIC_RELAXED))
375 			printf("%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
376 				"lcore id", "Buf Size", "Burst size",
377 				"Enqueued", "Dequeued", "Failed Enq",
378 				"Failed Deq", "Failed Ops");
379 
380 		printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64
381 				"%12"PRIu64"%12"PRIu64"\n",
382 				ctx->lcore_id,
383 				ctx->options->max_buffer_size,
384 				ctx->options->max_burst_size,
385 				ops_enqd_total,
386 				ops_deqd_total,
387 				ops_enqd_failed,
388 				ops_deqd_failed,
389 				ops_failed);
390 	} else {
391 		if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
392 				__ATOMIC_RELAXED, __ATOMIC_RELAXED))
393 			printf("\n# lcore id, Buffer Size(B), "
394 				"Burst Size,Enqueued,Dequeued,Failed Enq,"
395 				"Failed Deq,Failed Ops\n");
396 
397 		printf("%10u,%10u,%u,%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64","
398 				"%"PRIu64"\n",
399 				ctx->lcore_id,
400 				ctx->options->max_buffer_size,
401 				ctx->options->max_burst_size,
402 				ops_enqd_total,
403 				ops_deqd_total,
404 				ops_enqd_failed,
405 				ops_deqd_failed,
406 				ops_failed);
407 	}
408 
409 	return 0;
410 }
411 
412 
413 
414 void
415 cperf_verify_test_destructor(void *arg)
416 {
417 	struct cperf_verify_ctx *ctx = arg;
418 
419 	if (ctx == NULL)
420 		return;
421 
422 	cperf_verify_test_free(ctx);
423 }
424