xref: /dpdk/app/test-crypto-perf/cperf_test_pmd_cyclecount.c (revision 253624f46c9d34e6970ffa0dd709bb30399547fd)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <stdbool.h>
34 
35 #include <rte_crypto.h>
36 #include <rte_cryptodev.h>
37 #include <rte_cycles.h>
38 #include <rte_malloc.h>
39 
40 #include "cperf_ops.h"
41 #include "cperf_test_pmd_cyclecount.h"
42 #include "cperf_test_common.h"
43 
44 #define PRETTY_HDR_FMT "%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n"
45 #define PRETTY_LINE_FMT "%12u%12u%12u%12u%12u%12u%12u%12.0f%12.0f%12.0f\n"
46 #define CSV_HDR_FMT "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n"
47 #define CSV_LINE_FMT "%10u;%10u;%u;%u;%u;%u;%u;%.f3;%.f3;%.f3\n"
48 
49 struct cperf_pmd_cyclecount_ctx {
50 	uint8_t dev_id;
51 	uint16_t qp_id;
52 	uint8_t lcore_id;
53 
54 	struct rte_mempool *pkt_mbuf_pool_in;
55 	struct rte_mempool *pkt_mbuf_pool_out;
56 	struct rte_mbuf **mbufs_in;
57 	struct rte_mbuf **mbufs_out;
58 
59 	struct rte_mempool *crypto_op_pool;
60 	struct rte_crypto_op **ops;
61 	struct rte_crypto_op **ops_processed;
62 
63 	struct rte_cryptodev_sym_session *sess;
64 
65 	cperf_populate_ops_t populate_ops;
66 
67 	const struct cperf_options *options;
68 	const struct cperf_test_vector *test_vector;
69 };
70 
71 struct pmd_cyclecount_state {
72 	struct cperf_pmd_cyclecount_ctx *ctx;
73 	const struct cperf_options *opts;
74 	uint32_t lcore;
75 	uint64_t delay;
76 	int linearize;
77 	uint32_t ops_enqd;
78 	uint32_t ops_deqd;
79 	uint32_t ops_enq_retries;
80 	uint32_t ops_deq_retries;
81 	double cycles_per_build;
82 	double cycles_per_enq;
83 	double cycles_per_deq;
84 };
85 
86 static const uint16_t iv_offset =
87 		sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op);
88 
89 static void
90 cperf_pmd_cyclecount_test_free(struct cperf_pmd_cyclecount_ctx *ctx)
91 {
92 	if (ctx) {
93 		if (ctx->sess) {
94 			rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
95 			rte_cryptodev_sym_session_free(ctx->sess);
96 		}
97 
98 		cperf_free_common_memory(ctx->options,
99 				ctx->pkt_mbuf_pool_in,
100 				ctx->pkt_mbuf_pool_out,
101 				ctx->mbufs_in, ctx->mbufs_out,
102 				ctx->crypto_op_pool);
103 		if (ctx->ops)
104 			rte_free(ctx->ops);
105 
106 		if (ctx->ops_processed)
107 			rte_free(ctx->ops_processed);
108 
109 		rte_free(ctx);
110 	}
111 }
112 
113 void *
114 cperf_pmd_cyclecount_test_constructor(struct rte_mempool *sess_mp,
115 		uint8_t dev_id, uint16_t qp_id,
116 		const struct cperf_options *options,
117 		const struct cperf_test_vector *test_vector,
118 		const struct cperf_op_fns *op_fns)
119 {
120 	struct cperf_pmd_cyclecount_ctx *ctx = NULL;
121 
122 	/* preallocate buffers for crypto ops as they can get quite big */
123 	size_t alloc_sz = sizeof(struct rte_crypto_op *) *
124 			options->nb_descriptors;
125 
126 	ctx = rte_malloc(NULL, sizeof(struct cperf_pmd_cyclecount_ctx), 0);
127 	if (ctx == NULL)
128 		goto err;
129 
130 	ctx->dev_id = dev_id;
131 	ctx->qp_id = qp_id;
132 
133 	ctx->populate_ops = op_fns->populate_ops;
134 	ctx->options = options;
135 	ctx->test_vector = test_vector;
136 
137 	/* IV goes at the end of the crypto operation */
138 	uint16_t iv_offset = sizeof(struct rte_crypto_op) +
139 			sizeof(struct rte_crypto_sym_op);
140 
141 	ctx->sess = op_fns->sess_create(
142 			sess_mp, dev_id, options, test_vector, iv_offset);
143 	if (ctx->sess == NULL)
144 		goto err;
145 
146 	if (cperf_alloc_common_memory(options, test_vector, dev_id, 0,
147 			&ctx->pkt_mbuf_pool_in, &ctx->pkt_mbuf_pool_out,
148 			&ctx->mbufs_in, &ctx->mbufs_out,
149 			&ctx->crypto_op_pool) < 0)
150 		goto err;
151 
152 	ctx->ops = rte_malloc("ops", alloc_sz, 0);
153 	if (!ctx->ops)
154 		goto err;
155 
156 	ctx->ops_processed = rte_malloc("ops_processed", alloc_sz, 0);
157 	if (!ctx->ops_processed)
158 		goto err;
159 
160 	return ctx;
161 
162 err:
163 	cperf_pmd_cyclecount_test_free(ctx);
164 
165 	return NULL;
166 }
167 
168 /* benchmark alloc-build-free of ops */
169 static inline int
170 pmd_cyclecount_bench_ops(struct pmd_cyclecount_state *state, uint32_t cur_op,
171 		uint16_t test_burst_size)
172 {
173 	uint32_t iter_ops_left = state->opts->total_ops - cur_op;
174 	uint32_t iter_ops_needed =
175 			RTE_MIN(state->opts->nb_descriptors, iter_ops_left);
176 	uint32_t cur_iter_op;
177 
178 	for (cur_iter_op = 0; cur_iter_op < iter_ops_needed;
179 			cur_iter_op += test_burst_size) {
180 		uint32_t burst_size = RTE_MIN(state->opts->total_ops - cur_op,
181 				test_burst_size);
182 		struct rte_crypto_op **ops = &state->ctx->ops[cur_iter_op];
183 
184 		if (burst_size != rte_crypto_op_bulk_alloc(
185 				state->ctx->crypto_op_pool,
186 				RTE_CRYPTO_OP_TYPE_SYMMETRIC,
187 				ops, burst_size))
188 			return -1;
189 
190 		/* Setup crypto op, attach mbuf etc */
191 		(state->ctx->populate_ops)(ops,
192 				&state->ctx->mbufs_in[cur_iter_op],
193 				&state->ctx->mbufs_out[cur_iter_op], burst_size,
194 				state->ctx->sess, state->opts,
195 				state->ctx->test_vector, iv_offset);
196 
197 #ifdef CPERF_LINEARIZATION_ENABLE
198 		/* Check if source mbufs require coalescing */
199 		if (state->linearize) {
200 			uint8_t i;
201 			for (i = 0; i < burst_size; i++) {
202 				struct rte_mbuf *src = ops[i]->sym->m_src;
203 				rte_pktmbuf_linearize(src);
204 			}
205 		}
206 #endif /* CPERF_LINEARIZATION_ENABLE */
207 		rte_mempool_put_bulk(state->ctx->crypto_op_pool, (void **)ops,
208 				burst_size);
209 	}
210 
211 	return 0;
212 }
213 
214 /* allocate and build ops (no free) */
215 static int
216 pmd_cyclecount_build_ops(struct pmd_cyclecount_state *state,
217 		uint32_t iter_ops_needed, uint16_t test_burst_size)
218 {
219 	uint32_t cur_iter_op;
220 
221 	for (cur_iter_op = 0; cur_iter_op < iter_ops_needed;
222 			cur_iter_op += test_burst_size) {
223 		uint32_t burst_size = RTE_MIN(
224 				iter_ops_needed - cur_iter_op, test_burst_size);
225 		struct rte_crypto_op **ops = &state->ctx->ops[cur_iter_op];
226 
227 		if (burst_size != rte_crypto_op_bulk_alloc(
228 				state->ctx->crypto_op_pool,
229 				RTE_CRYPTO_OP_TYPE_SYMMETRIC,
230 				ops, burst_size))
231 			return -1;
232 
233 		/* Setup crypto op, attach mbuf etc */
234 		(state->ctx->populate_ops)(ops,
235 				&state->ctx->mbufs_in[cur_iter_op],
236 				&state->ctx->mbufs_out[cur_iter_op], burst_size,
237 				state->ctx->sess, state->opts,
238 				state->ctx->test_vector, iv_offset);
239 	}
240 	return 0;
241 }
242 
243 /* benchmark enqueue, returns number of ops enqueued */
244 static uint32_t
245 pmd_cyclecount_bench_enq(struct pmd_cyclecount_state *state,
246 		uint32_t iter_ops_needed, uint16_t test_burst_size)
247 {
248 	/* Enqueue full descriptor ring of ops on crypto device */
249 	uint32_t cur_iter_op = 0;
250 	while (cur_iter_op < iter_ops_needed) {
251 		uint32_t burst_size = RTE_MIN(iter_ops_needed - cur_iter_op,
252 				test_burst_size);
253 		struct rte_crypto_op **ops = &state->ctx->ops[cur_iter_op];
254 		uint32_t burst_enqd;
255 
256 		burst_enqd = rte_cryptodev_enqueue_burst(state->ctx->dev_id,
257 				state->ctx->qp_id, ops, burst_size);
258 
259 		/* if we couldn't enqueue anything, the queue is full */
260 		if (!burst_enqd) {
261 			/* don't try to dequeue anything we didn't enqueue */
262 			return cur_iter_op;
263 		}
264 
265 		if (burst_enqd < burst_size)
266 			state->ops_enq_retries++;
267 		state->ops_enqd += burst_enqd;
268 		cur_iter_op += burst_enqd;
269 	}
270 	return iter_ops_needed;
271 }
272 
273 /* benchmark dequeue */
274 static void
275 pmd_cyclecount_bench_deq(struct pmd_cyclecount_state *state,
276 		uint32_t iter_ops_needed, uint16_t test_burst_size)
277 {
278 	/* Dequeue full descriptor ring of ops on crypto device */
279 	uint32_t cur_iter_op = 0;
280 	while (cur_iter_op < iter_ops_needed) {
281 		uint32_t burst_size = RTE_MIN(iter_ops_needed - cur_iter_op,
282 				test_burst_size);
283 		struct rte_crypto_op **ops_processed =
284 				&state->ctx->ops[cur_iter_op];
285 		uint32_t burst_deqd;
286 
287 		burst_deqd = rte_cryptodev_dequeue_burst(state->ctx->dev_id,
288 				state->ctx->qp_id, ops_processed, burst_size);
289 
290 		if (burst_deqd < burst_size)
291 			state->ops_deq_retries++;
292 		state->ops_deqd += burst_deqd;
293 		cur_iter_op += burst_deqd;
294 	}
295 }
296 
297 /* run benchmark per burst size */
298 static inline int
299 pmd_cyclecount_bench_burst_sz(
300 		struct pmd_cyclecount_state *state, uint16_t test_burst_size)
301 {
302 	uint64_t tsc_start;
303 	uint64_t tsc_end;
304 	uint64_t tsc_op;
305 	uint64_t tsc_enq;
306 	uint64_t tsc_deq;
307 	uint32_t cur_op;
308 
309 	/* reset all counters */
310 	tsc_enq = 0;
311 	tsc_deq = 0;
312 	state->ops_enqd = 0;
313 	state->ops_enq_retries = 0;
314 	state->ops_deqd = 0;
315 	state->ops_deq_retries = 0;
316 
317 	/*
318 	 * Benchmark crypto op alloc-build-free separately.
319 	 */
320 	tsc_start = rte_rdtsc_precise();
321 
322 	for (cur_op = 0; cur_op < state->opts->total_ops;
323 			cur_op += state->opts->nb_descriptors) {
324 		if (unlikely(pmd_cyclecount_bench_ops(
325 				state, cur_op, test_burst_size)))
326 			return -1;
327 	}
328 
329 	tsc_end = rte_rdtsc_precise();
330 	tsc_op = tsc_end - tsc_start;
331 
332 
333 	/*
334 	 * Hardware acceleration cyclecount benchmarking loop.
335 	 *
336 	 * We're benchmarking raw enq/deq performance by filling up the device
337 	 * queue, so we never get any failed enqs unless the driver won't accept
338 	 * the exact number of descriptors we requested, or the driver won't
339 	 * wrap around the end of the TX ring. However, since we're only
340 	 * dequeueing once we've filled up the queue, we have to benchmark it
341 	 * piecemeal and then average out the results.
342 	 */
343 	cur_op = 0;
344 	while (cur_op < state->opts->total_ops) {
345 		uint32_t iter_ops_left = state->opts->total_ops - cur_op;
346 		uint32_t iter_ops_needed = RTE_MIN(
347 				state->opts->nb_descriptors, iter_ops_left);
348 		uint32_t iter_ops_allocd = iter_ops_needed;
349 
350 		/* allocate and build ops */
351 		if (unlikely(pmd_cyclecount_build_ops(state, iter_ops_needed,
352 				test_burst_size)))
353 			return -1;
354 
355 		tsc_start = rte_rdtsc_precise();
356 
357 		/* fill up TX ring */
358 		iter_ops_needed = pmd_cyclecount_bench_enq(state,
359 				iter_ops_needed, test_burst_size);
360 
361 		tsc_end = rte_rdtsc_precise();
362 
363 		tsc_enq += tsc_end - tsc_start;
364 
365 		/* allow for HW to catch up */
366 		if (state->delay)
367 			rte_delay_us_block(state->delay);
368 
369 		tsc_start = rte_rdtsc_precise();
370 
371 		/* drain RX ring */
372 		pmd_cyclecount_bench_deq(state, iter_ops_needed,
373 				test_burst_size);
374 
375 		tsc_end = rte_rdtsc_precise();
376 
377 		tsc_deq += tsc_end - tsc_start;
378 
379 		cur_op += iter_ops_needed;
380 
381 		/*
382 		 * we may not have processed all ops that we allocated, so
383 		 * free everything we've allocated.
384 		 */
385 		rte_mempool_put_bulk(state->ctx->crypto_op_pool,
386 				(void **)state->ctx->ops, iter_ops_allocd);
387 	}
388 
389 	state->cycles_per_build = (double)tsc_op / state->opts->total_ops;
390 	state->cycles_per_enq = (double)tsc_enq / state->ops_enqd;
391 	state->cycles_per_deq = (double)tsc_deq / state->ops_deqd;
392 
393 	return 0;
394 }
395 
396 int
397 cperf_pmd_cyclecount_test_runner(void *test_ctx)
398 {
399 	struct pmd_cyclecount_state state = {0};
400 	const struct cperf_options *opts;
401 	uint16_t test_burst_size;
402 	uint8_t burst_size_idx = 0;
403 
404 	state.ctx = test_ctx;
405 	opts = state.ctx->options;
406 	state.opts = opts;
407 	state.lcore = rte_lcore_id();
408 	state.linearize = 0;
409 
410 	static int only_once;
411 	static bool warmup = true;
412 
413 	/*
414 	 * We need a small delay to allow for hardware to process all the crypto
415 	 * operations. We can't automatically figure out what the delay should
416 	 * be, so we leave it up to the user (by default it's 0).
417 	 */
418 	state.delay = 1000 * opts->pmdcc_delay;
419 
420 #ifdef CPERF_LINEARIZATION_ENABLE
421 	struct rte_cryptodev_info dev_info;
422 
423 	/* Check if source mbufs require coalescing */
424 	if (opts->segments_nb > 1) {
425 		rte_cryptodev_info_get(state.ctx->dev_id, &dev_info);
426 		if ((dev_info.feature_flags &
427 				    RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) ==
428 				0) {
429 			state.linearize = 1;
430 		}
431 	}
432 #endif /* CPERF_LINEARIZATION_ENABLE */
433 
434 	state.ctx->lcore_id = state.lcore;
435 
436 	/* Get first size from range or list */
437 	if (opts->inc_burst_size != 0)
438 		test_burst_size = opts->min_burst_size;
439 	else
440 		test_burst_size = opts->burst_size_list[0];
441 
442 	while (test_burst_size <= opts->max_burst_size) {
443 		/* do a benchmark run */
444 		if (pmd_cyclecount_bench_burst_sz(&state, test_burst_size))
445 			return -1;
446 
447 		/*
448 		 * First run is always a warm up run.
449 		 */
450 		if (warmup) {
451 			warmup = false;
452 			continue;
453 		}
454 
455 		if (!opts->csv) {
456 			if (!only_once)
457 				printf(PRETTY_HDR_FMT, "lcore id", "Buf Size",
458 						"Burst Size", "Enqueued",
459 						"Dequeued", "Enq Retries",
460 						"Deq Retries", "Cycles/Op",
461 						"Cycles/Enq", "Cycles/Deq");
462 			only_once = 1;
463 
464 			printf(PRETTY_LINE_FMT, state.ctx->lcore_id,
465 					opts->test_buffer_size, test_burst_size,
466 					state.ops_enqd, state.ops_deqd,
467 					state.ops_enq_retries,
468 					state.ops_deq_retries,
469 					state.cycles_per_build,
470 					state.cycles_per_enq,
471 					state.cycles_per_deq);
472 		} else {
473 			if (!only_once)
474 				printf(CSV_HDR_FMT, "# lcore id", "Buf Size",
475 						"Burst Size", "Enqueued",
476 						"Dequeued", "Enq Retries",
477 						"Deq Retries", "Cycles/Op",
478 						"Cycles/Enq", "Cycles/Deq");
479 			only_once = 1;
480 
481 			printf(CSV_LINE_FMT, state.ctx->lcore_id,
482 					opts->test_buffer_size, test_burst_size,
483 					state.ops_enqd, state.ops_deqd,
484 					state.ops_enq_retries,
485 					state.ops_deq_retries,
486 					state.cycles_per_build,
487 					state.cycles_per_enq,
488 					state.cycles_per_deq);
489 		}
490 
491 		/* Get next size from range or list */
492 		if (opts->inc_burst_size != 0)
493 			test_burst_size += opts->inc_burst_size;
494 		else {
495 			if (++burst_size_idx == opts->burst_size_count)
496 				break;
497 			test_burst_size = opts->burst_size_list[burst_size_idx];
498 		}
499 	}
500 
501 	return 0;
502 }
503 
504 void
505 cperf_pmd_cyclecount_test_destructor(void *arg)
506 {
507 	struct cperf_pmd_cyclecount_ctx *ctx = arg;
508 
509 	if (ctx == NULL)
510 		return;
511 
512 	cperf_pmd_cyclecount_test_free(ctx);
513 }
514