xref: /dpdk/app/test-crypto-perf/cperf_test_verify.c (revision f8dbaebbf1c9efcbb2e2354b341ed62175466a57)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <rte_malloc.h>
6 #include <rte_cycles.h>
7 #include <rte_crypto.h>
8 #include <rte_cryptodev.h>
9 
10 #include "cperf_test_verify.h"
11 #include "cperf_ops.h"
12 #include "cperf_test_common.h"
13 
14 struct cperf_verify_ctx {
15 	uint8_t dev_id;
16 	uint16_t qp_id;
17 	uint8_t lcore_id;
18 
19 	struct rte_mempool *pool;
20 
21 	struct rte_cryptodev_sym_session *sess;
22 
23 	cperf_populate_ops_t populate_ops;
24 
25 	uint32_t src_buf_offset;
26 	uint32_t dst_buf_offset;
27 
28 	const struct cperf_options *options;
29 	const struct cperf_test_vector *test_vector;
30 };
31 
32 struct cperf_op_result {
33 	enum rte_crypto_op_status status;
34 };
35 
36 static void
37 cperf_verify_test_free(struct cperf_verify_ctx *ctx)
38 {
39 	if (ctx) {
40 		if (ctx->sess) {
41 			rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
42 			rte_cryptodev_sym_session_free(ctx->sess);
43 		}
44 
45 		if (ctx->pool)
46 			rte_mempool_free(ctx->pool);
47 
48 		rte_free(ctx);
49 	}
50 }
51 
52 void *
53 cperf_verify_test_constructor(struct rte_mempool *sess_mp,
54 		struct rte_mempool *sess_priv_mp,
55 		uint8_t dev_id, uint16_t qp_id,
56 		const struct cperf_options *options,
57 		const struct cperf_test_vector *test_vector,
58 		const struct cperf_op_fns *op_fns)
59 {
60 	struct cperf_verify_ctx *ctx = NULL;
61 
62 	ctx = rte_malloc(NULL, sizeof(struct cperf_verify_ctx), 0);
63 	if (ctx == NULL)
64 		goto err;
65 
66 	ctx->dev_id = dev_id;
67 	ctx->qp_id = qp_id;
68 
69 	ctx->populate_ops = op_fns->populate_ops;
70 	ctx->options = options;
71 	ctx->test_vector = test_vector;
72 
73 	/* IV goes at the end of the crypto operation */
74 	uint16_t iv_offset = sizeof(struct rte_crypto_op) +
75 		sizeof(struct rte_crypto_sym_op);
76 
77 	ctx->sess = op_fns->sess_create(sess_mp, sess_priv_mp, dev_id, options,
78 			test_vector, iv_offset);
79 	if (ctx->sess == NULL)
80 		goto err;
81 
82 	if (cperf_alloc_common_memory(options, test_vector, dev_id, qp_id, 0,
83 			&ctx->src_buf_offset, &ctx->dst_buf_offset,
84 			&ctx->pool) < 0)
85 		goto err;
86 
87 	return ctx;
88 err:
89 	cperf_verify_test_free(ctx);
90 
91 	return NULL;
92 }
93 
94 static int
95 cperf_verify_op(struct rte_crypto_op *op,
96 		const struct cperf_options *options,
97 		const struct cperf_test_vector *vector)
98 {
99 	const struct rte_mbuf *m;
100 	uint32_t len;
101 	uint16_t nb_segs;
102 	uint8_t *data;
103 	uint32_t cipher_offset, auth_offset;
104 	uint8_t	cipher, auth;
105 	int res = 0;
106 
107 	if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
108 		return 1;
109 
110 	if (op->sym->m_dst)
111 		m = op->sym->m_dst;
112 	else
113 		m = op->sym->m_src;
114 	nb_segs = m->nb_segs;
115 	len = 0;
116 	while (m && nb_segs != 0) {
117 		len += m->data_len;
118 		m = m->next;
119 		nb_segs--;
120 	}
121 
122 	data = rte_malloc(NULL, len, 0);
123 	if (data == NULL)
124 		return 1;
125 
126 	if (op->sym->m_dst)
127 		m = op->sym->m_dst;
128 	else
129 		m = op->sym->m_src;
130 	nb_segs = m->nb_segs;
131 	len = 0;
132 	while (m && nb_segs != 0) {
133 		memcpy(data + len, rte_pktmbuf_mtod(m, uint8_t *),
134 				m->data_len);
135 		len += m->data_len;
136 		m = m->next;
137 		nb_segs--;
138 	}
139 
140 	switch (options->op_type) {
141 	case CPERF_CIPHER_ONLY:
142 		cipher = 1;
143 		cipher_offset = 0;
144 		auth = 0;
145 		auth_offset = 0;
146 		break;
147 	case CPERF_CIPHER_THEN_AUTH:
148 		cipher = 1;
149 		cipher_offset = 0;
150 		auth = 1;
151 		auth_offset = options->test_buffer_size;
152 		break;
153 	case CPERF_AUTH_ONLY:
154 		cipher = 0;
155 		cipher_offset = 0;
156 		auth = 1;
157 		auth_offset = options->test_buffer_size;
158 		break;
159 	case CPERF_AUTH_THEN_CIPHER:
160 		cipher = 1;
161 		cipher_offset = 0;
162 		auth = 1;
163 		auth_offset = options->test_buffer_size;
164 		break;
165 	case CPERF_AEAD:
166 		cipher = 1;
167 		cipher_offset = 0;
168 		auth = 1;
169 		auth_offset = options->test_buffer_size;
170 		break;
171 	default:
172 		res = 1;
173 		goto out;
174 	}
175 
176 	if (cipher == 1) {
177 		if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
178 			res += memcmp(data + cipher_offset,
179 					vector->ciphertext.data,
180 					options->test_buffer_size);
181 		else
182 			res += memcmp(data + cipher_offset,
183 					vector->plaintext.data,
184 					options->test_buffer_size);
185 	}
186 
187 	if (auth == 1) {
188 		if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE)
189 			res += memcmp(data + auth_offset,
190 					vector->digest.data,
191 					options->digest_sz);
192 	}
193 
194 out:
195 	rte_free(data);
196 	return !!res;
197 }
198 
199 static void
200 cperf_mbuf_set(struct rte_mbuf *mbuf,
201 		const struct cperf_options *options,
202 		const struct cperf_test_vector *test_vector)
203 {
204 	uint32_t segment_sz = options->segment_sz;
205 	uint8_t *mbuf_data;
206 	uint8_t *test_data;
207 	uint32_t remaining_bytes = options->max_buffer_size;
208 
209 	if (options->op_type == CPERF_AEAD) {
210 		test_data = (options->aead_op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
211 					test_vector->plaintext.data :
212 					test_vector->ciphertext.data;
213 	} else {
214 		test_data =
215 			(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
216 				test_vector->plaintext.data :
217 				test_vector->ciphertext.data;
218 	}
219 
220 	while (remaining_bytes) {
221 		mbuf_data = rte_pktmbuf_mtod(mbuf, uint8_t *);
222 
223 		if (remaining_bytes <= segment_sz) {
224 			memcpy(mbuf_data, test_data, remaining_bytes);
225 			return;
226 		}
227 
228 		memcpy(mbuf_data, test_data, segment_sz);
229 		remaining_bytes -= segment_sz;
230 		test_data += segment_sz;
231 		mbuf = mbuf->next;
232 	}
233 }
234 
235 int
236 cperf_verify_test_runner(void *test_ctx)
237 {
238 	struct cperf_verify_ctx *ctx = test_ctx;
239 
240 	uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
241 	uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
242 	uint64_t ops_failed = 0;
243 
244 	static uint16_t display_once;
245 
246 	uint64_t i;
247 	uint16_t ops_unused = 0;
248 	uint32_t imix_idx = 0;
249 
250 	struct rte_crypto_op *ops[ctx->options->max_burst_size];
251 	struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
252 
253 	uint32_t lcore = rte_lcore_id();
254 
255 #ifdef CPERF_LINEARIZATION_ENABLE
256 	struct rte_cryptodev_info dev_info;
257 	int linearize = 0;
258 
259 	/* Check if source mbufs require coalescing */
260 	if (ctx->options->segment_sz < ctx->options->max_buffer_size) {
261 		rte_cryptodev_info_get(ctx->dev_id, &dev_info);
262 		if ((dev_info.feature_flags &
263 				RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
264 			linearize = 1;
265 	}
266 #endif /* CPERF_LINEARIZATION_ENABLE */
267 
268 	ctx->lcore_id = lcore;
269 
270 	if (!ctx->options->csv)
271 		printf("\n# Running verify test on device: %u, lcore: %u\n",
272 			ctx->dev_id, lcore);
273 
274 	uint16_t iv_offset = sizeof(struct rte_crypto_op) +
275 		sizeof(struct rte_crypto_sym_op);
276 
277 	while (ops_enqd_total < ctx->options->total_ops) {
278 
279 		uint16_t burst_size = ((ops_enqd_total + ctx->options->max_burst_size)
280 				<= ctx->options->total_ops) ?
281 						ctx->options->max_burst_size :
282 						ctx->options->total_ops -
283 						ops_enqd_total;
284 
285 		uint16_t ops_needed = burst_size - ops_unused;
286 
287 		/* Allocate objects containing crypto operations and mbufs */
288 		if (rte_mempool_get_bulk(ctx->pool, (void **)ops,
289 					ops_needed) != 0) {
290 			RTE_LOG(ERR, USER1,
291 				"Failed to allocate more crypto operations "
292 				"from the crypto operation pool.\n"
293 				"Consider increasing the pool size "
294 				"with --pool-sz\n");
295 			return -1;
296 		}
297 
298 		/* Setup crypto op, attach mbuf etc */
299 		(ctx->populate_ops)(ops, ctx->src_buf_offset,
300 				ctx->dst_buf_offset,
301 				ops_needed, ctx->sess, ctx->options,
302 				ctx->test_vector, iv_offset, &imix_idx, NULL);
303 
304 
305 		/* Populate the mbuf with the test vector, for verification */
306 		for (i = 0; i < ops_needed; i++)
307 			cperf_mbuf_set(ops[i]->sym->m_src,
308 					ctx->options,
309 					ctx->test_vector);
310 
311 #ifdef CPERF_LINEARIZATION_ENABLE
312 		if (linearize) {
313 			/* PMD doesn't support scatter-gather and source buffer
314 			 * is segmented.
315 			 * We need to linearize it before enqueuing.
316 			 */
317 			for (i = 0; i < burst_size; i++)
318 				rte_pktmbuf_linearize(ops[i]->sym->m_src);
319 		}
320 #endif /* CPERF_LINEARIZATION_ENABLE */
321 
322 		/* Enqueue burst of ops on crypto device */
323 		ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
324 				ops, burst_size);
325 		if (ops_enqd < burst_size)
326 			ops_enqd_failed++;
327 
328 		/**
329 		 * Calculate number of ops not enqueued (mainly for hw
330 		 * accelerators whose ingress queue can fill up).
331 		 */
332 		ops_unused = burst_size - ops_enqd;
333 		ops_enqd_total += ops_enqd;
334 
335 
336 		/* Dequeue processed burst of ops from crypto device */
337 		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
338 				ops_processed, ctx->options->max_burst_size);
339 
340 		if (ops_deqd == 0) {
341 			/**
342 			 * Count dequeue polls which didn't return any
343 			 * processed operations. This statistic is mainly
344 			 * relevant to hw accelerators.
345 			 */
346 			ops_deqd_failed++;
347 			continue;
348 		}
349 
350 		for (i = 0; i < ops_deqd; i++) {
351 			if (cperf_verify_op(ops_processed[i], ctx->options,
352 						ctx->test_vector))
353 				ops_failed++;
354 		}
355 		/* Free crypto ops so they can be reused. */
356 		rte_mempool_put_bulk(ctx->pool,
357 					(void **)ops_processed, ops_deqd);
358 		ops_deqd_total += ops_deqd;
359 	}
360 
361 	/* Dequeue any operations still in the crypto device */
362 
363 	while (ops_deqd_total < ctx->options->total_ops) {
364 		/* Sending 0 length burst to flush sw crypto device */
365 		rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
366 
367 		/* dequeue burst */
368 		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
369 				ops_processed, ctx->options->max_burst_size);
370 		if (ops_deqd == 0) {
371 			ops_deqd_failed++;
372 			continue;
373 		}
374 
375 		for (i = 0; i < ops_deqd; i++) {
376 			if (cperf_verify_op(ops_processed[i], ctx->options,
377 						ctx->test_vector))
378 				ops_failed++;
379 		}
380 		/* Free crypto ops so they can be reused. */
381 		rte_mempool_put_bulk(ctx->pool,
382 					(void **)ops_processed, ops_deqd);
383 		ops_deqd_total += ops_deqd;
384 	}
385 
386 	uint16_t exp = 0;
387 	if (!ctx->options->csv) {
388 		if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
389 				__ATOMIC_RELAXED, __ATOMIC_RELAXED))
390 			printf("%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
391 				"lcore id", "Buf Size", "Burst size",
392 				"Enqueued", "Dequeued", "Failed Enq",
393 				"Failed Deq", "Failed Ops");
394 
395 		printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64
396 				"%12"PRIu64"%12"PRIu64"\n",
397 				ctx->lcore_id,
398 				ctx->options->max_buffer_size,
399 				ctx->options->max_burst_size,
400 				ops_enqd_total,
401 				ops_deqd_total,
402 				ops_enqd_failed,
403 				ops_deqd_failed,
404 				ops_failed);
405 	} else {
406 		if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
407 				__ATOMIC_RELAXED, __ATOMIC_RELAXED))
408 			printf("\n# lcore id, Buffer Size(B), "
409 				"Burst Size,Enqueued,Dequeued,Failed Enq,"
410 				"Failed Deq,Failed Ops\n");
411 
412 		printf("%10u,%10u,%u,%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64","
413 				"%"PRIu64"\n",
414 				ctx->lcore_id,
415 				ctx->options->max_buffer_size,
416 				ctx->options->max_burst_size,
417 				ops_enqd_total,
418 				ops_deqd_total,
419 				ops_enqd_failed,
420 				ops_deqd_failed,
421 				ops_failed);
422 	}
423 
424 	return 0;
425 }
426 
427 
428 
429 void
430 cperf_verify_test_destructor(void *arg)
431 {
432 	struct cperf_verify_ctx *ctx = arg;
433 
434 	if (ctx == NULL)
435 		return;
436 
437 	cperf_verify_test_free(ctx);
438 }
439