xref: /dpdk/app/test-eventdev/test_perf_common.h (revision bca734c27e345af500d0d951421584e2567cd107)
153a3b7e8SJerin Jacob /* SPDX-License-Identifier: BSD-3-Clause
253a3b7e8SJerin Jacob  * Copyright(c) 2017 Cavium, Inc
3ffbae86fSJerin Jacob  */
4ffbae86fSJerin Jacob 
5ffbae86fSJerin Jacob #ifndef _TEST_PERF_COMMON_
6ffbae86fSJerin Jacob #define _TEST_PERF_COMMON_
7ffbae86fSJerin Jacob 
8ffbae86fSJerin Jacob #include <stdio.h>
9ffbae86fSJerin Jacob #include <stdbool.h>
10ffbae86fSJerin Jacob #include <unistd.h>
11ffbae86fSJerin Jacob 
12de2bc16eSShijith Thotton #include <rte_cryptodev.h>
13ffbae86fSJerin Jacob #include <rte_cycles.h>
1459f697e3SPavan Nikhilesh #include <rte_ethdev.h>
15de2bc16eSShijith Thotton #include <rte_event_crypto_adapter.h>
163617aae5SPavan Nikhilesh #include <rte_event_eth_rx_adapter.h>
17a734e738SPavan Nikhilesh #include <rte_event_eth_tx_adapter.h>
18d008f20bSPavan Nikhilesh #include <rte_event_timer_adapter.h>
19a734e738SPavan Nikhilesh #include <rte_eventdev.h>
20ffbae86fSJerin Jacob #include <rte_lcore.h>
21ffbae86fSJerin Jacob #include <rte_malloc.h>
22ffbae86fSJerin Jacob #include <rte_mempool.h>
23ffbae86fSJerin Jacob #include <rte_prefetch.h>
24ffbae86fSJerin Jacob 
25ffbae86fSJerin Jacob #include "evt_common.h"
26ffbae86fSJerin Jacob #include "evt_options.h"
27ffbae86fSJerin Jacob #include "evt_test.h"
28ffbae86fSJerin Jacob 
29de2bc16eSShijith Thotton #define TEST_PERF_CA_ID 0
30b25a66c4SAmit Prakash Shukla #define TEST_PERF_DA_ID 0
31de2bc16eSShijith Thotton 
32ffbae86fSJerin Jacob struct test_perf;
33ffbae86fSJerin Jacob 
340efea35aSTyler Retzlaff struct __rte_cache_aligned worker_data {
35ffbae86fSJerin Jacob 	uint64_t processed_pkts;
36ffbae86fSJerin Jacob 	uint64_t latency;
37ffbae86fSJerin Jacob 	uint8_t dev_id;
38ffbae86fSJerin Jacob 	uint8_t port_id;
39ffbae86fSJerin Jacob 	struct test_perf *t;
400efea35aSTyler Retzlaff };
41ffbae86fSJerin Jacob 
42de2bc16eSShijith Thotton struct crypto_adptr_data {
43de2bc16eSShijith Thotton 	uint8_t cdev_id;
44de2bc16eSShijith Thotton 	uint16_t cdev_qp_id;
458f5b5495SAkhil Goyal 	void **crypto_sess;
46de2bc16eSShijith Thotton };
47b25a66c4SAmit Prakash Shukla 
48b25a66c4SAmit Prakash Shukla struct dma_adptr_data {
49b25a66c4SAmit Prakash Shukla 	uint8_t dma_dev_id;
50b25a66c4SAmit Prakash Shukla 	uint16_t vchan_id;
51b25a66c4SAmit Prakash Shukla };
52b25a66c4SAmit Prakash Shukla 
530efea35aSTyler Retzlaff struct __rte_cache_aligned prod_data {
54ffbae86fSJerin Jacob 	uint8_t dev_id;
55ffbae86fSJerin Jacob 	uint8_t port_id;
56ffbae86fSJerin Jacob 	uint8_t queue_id;
57de2bc16eSShijith Thotton 	struct crypto_adptr_data ca;
58b25a66c4SAmit Prakash Shukla 	struct dma_adptr_data da;
59ffbae86fSJerin Jacob 	struct test_perf *t;
600efea35aSTyler Retzlaff };
61ffbae86fSJerin Jacob 
620efea35aSTyler Retzlaff struct __rte_cache_aligned test_perf {
63ffbae86fSJerin Jacob 	/* Don't change the offset of "done". Signal handler use this memory
64ffbae86fSJerin Jacob 	 * to terminate all lcores work.
65ffbae86fSJerin Jacob 	 */
66ffbae86fSJerin Jacob 	int done;
67ffbae86fSJerin Jacob 	uint64_t outstand_pkts;
68ffbae86fSJerin Jacob 	uint8_t nb_workers;
69ffbae86fSJerin Jacob 	enum evt_test_result result;
70ffbae86fSJerin Jacob 	uint32_t nb_flows;
71ffbae86fSJerin Jacob 	uint64_t nb_pkts;
72ffbae86fSJerin Jacob 	struct rte_mempool *pool;
73ffbae86fSJerin Jacob 	struct prod_data prod[EVT_MAX_PORTS];
74ffbae86fSJerin Jacob 	struct worker_data worker[EVT_MAX_PORTS];
75ffbae86fSJerin Jacob 	struct evt_options *opt;
760efea35aSTyler Retzlaff 	alignas(RTE_CACHE_LINE_SIZE) uint8_t sched_type_list[EVT_MAX_STAGES];
770efea35aSTyler Retzlaff 	alignas(RTE_CACHE_LINE_SIZE) struct rte_event_timer_adapter *timer_adptr[
780efea35aSTyler Retzlaff 		RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
79de2bc16eSShijith Thotton 	struct rte_mempool *ca_op_pool;
80de2bc16eSShijith Thotton 	struct rte_mempool *ca_sess_pool;
818f5b5495SAkhil Goyal 	struct rte_mempool *ca_asym_sess_pool;
8269e807dfSVolodymyr Fialko 	struct rte_mempool *ca_vector_pool;
830efea35aSTyler Retzlaff };
84ffbae86fSJerin Jacob 
850efea35aSTyler Retzlaff struct __rte_cache_aligned perf_elt {
86d008f20bSPavan Nikhilesh 	union {
87d008f20bSPavan Nikhilesh 		struct rte_event_timer tim;
88d008f20bSPavan Nikhilesh 		struct {
89d008f20bSPavan Nikhilesh 			char pad[offsetof(struct rte_event_timer, user_meta)];
9041c219e6SJerin Jacob 			uint64_t timestamp;
91d008f20bSPavan Nikhilesh 		};
92d008f20bSPavan Nikhilesh 	};
930efea35aSTyler Retzlaff };
9441c219e6SJerin Jacob 
952369f733SJerin Jacob #define BURST_SIZE 16
9620841a25SRashmi Shetty #define MAX_PROD_ENQ_BURST_SIZE 128
972369f733SJerin Jacob 
982369f733SJerin Jacob #define PERF_WORKER_INIT\
992369f733SJerin Jacob 	struct worker_data *w  = arg;\
1002369f733SJerin Jacob 	struct test_perf *t = w->t;\
1012369f733SJerin Jacob 	struct evt_options *opt = t->opt;\
1022369f733SJerin Jacob 	const uint8_t dev = w->dev_id;\
1032369f733SJerin Jacob 	const uint8_t port = w->port_id;\
104d008f20bSPavan Nikhilesh 	const uint8_t prod_timer_type = \
105d008f20bSPavan Nikhilesh 		opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR;\
1062369f733SJerin Jacob 	uint8_t *const sched_type_list = &t->sched_type_list[0];\
107b25a66c4SAmit Prakash Shukla 	const enum evt_prod_type prod_type = opt->prod_type;\
1082369f733SJerin Jacob 	struct rte_mempool *const pool = t->pool;\
1092369f733SJerin Jacob 	const uint8_t nb_stages = t->opt->nb_stages;\
1102369f733SJerin Jacob 	const uint8_t laststage = nb_stages - 1;\
1112369f733SJerin Jacob 	uint8_t cnt = 0;\
1120efea35aSTyler Retzlaff 	alignas(RTE_CACHE_LINE_SIZE) void *bufs[16];\
1132369f733SJerin Jacob 	int const sz = RTE_DIM(bufs);\
11469e807dfSVolodymyr Fialko 	uint8_t stage;\
11569e807dfSVolodymyr Fialko 	struct perf_elt *pe = NULL;\
1162369f733SJerin Jacob 	if (opt->verbose_level > 1)\
1172369f733SJerin Jacob 		printf("%s(): lcore %d dev_id %d port=%d\n", __func__,\
1182369f733SJerin Jacob 				rte_lcore_id(), dev, port)
1192369f733SJerin Jacob 
1206776a581SVolodymyr Fialko static __rte_always_inline void
121*bca734c2SPavan Nikhilesh perf_mark_fwd_latency(enum evt_prod_type prod_type, struct rte_event *const ev)
1226776a581SVolodymyr Fialko {
123*bca734c2SPavan Nikhilesh 	struct perf_elt *pe;
1246776a581SVolodymyr Fialko 
125*bca734c2SPavan Nikhilesh 	if (prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
1266776a581SVolodymyr Fialko 		struct rte_crypto_op *op = ev->event_ptr;
1276776a581SVolodymyr Fialko 		struct rte_mbuf *m;
1286776a581SVolodymyr Fialko 
1296776a581SVolodymyr Fialko 		if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1306776a581SVolodymyr Fialko 			if (op->sym->m_dst == NULL)
1316776a581SVolodymyr Fialko 				m = op->sym->m_src;
1326776a581SVolodymyr Fialko 			else
1336776a581SVolodymyr Fialko 				m = op->sym->m_dst;
134*bca734c2SPavan Nikhilesh 
135*bca734c2SPavan Nikhilesh 			pe = rte_pktmbuf_mtod(m, struct perf_elt *);
1366776a581SVolodymyr Fialko 		} else {
137*bca734c2SPavan Nikhilesh 			pe = RTE_PTR_ADD(op->asym->modex.result.data,
138*bca734c2SPavan Nikhilesh 					 op->asym->modex.result.length);
139*bca734c2SPavan Nikhilesh 		}
140*bca734c2SPavan Nikhilesh 		pe->timestamp = rte_get_timer_cycles();
141*bca734c2SPavan Nikhilesh 	} else if (prod_type == EVT_PROD_TYPE_EVENT_DMA_ADPTR) {
142*bca734c2SPavan Nikhilesh 		struct rte_event_dma_adapter_op *op = ev->event_ptr;
143*bca734c2SPavan Nikhilesh 
144*bca734c2SPavan Nikhilesh 		op->user_meta = rte_get_timer_cycles();
145*bca734c2SPavan Nikhilesh 	} else {
146*bca734c2SPavan Nikhilesh 		pe = ev->event_ptr;
147*bca734c2SPavan Nikhilesh 		pe->timestamp = rte_get_timer_cycles();
148*bca734c2SPavan Nikhilesh 	}
149*bca734c2SPavan Nikhilesh }
150*bca734c2SPavan Nikhilesh 
151*bca734c2SPavan Nikhilesh static __rte_always_inline int
152*bca734c2SPavan Nikhilesh perf_handle_crypto_ev(struct rte_event *ev)
153*bca734c2SPavan Nikhilesh {
154*bca734c2SPavan Nikhilesh 	struct rte_crypto_op *op = ev->event_ptr;
155*bca734c2SPavan Nikhilesh 
156*bca734c2SPavan Nikhilesh 	if (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) {
157*bca734c2SPavan Nikhilesh 		rte_crypto_op_free(op);
158*bca734c2SPavan Nikhilesh 		return op->status;
1596776a581SVolodymyr Fialko 	}
1606776a581SVolodymyr Fialko 
1616776a581SVolodymyr Fialko 	return 0;
1626776a581SVolodymyr Fialko }
1636776a581SVolodymyr Fialko 
16469e807dfSVolodymyr Fialko static __rte_always_inline struct perf_elt *
16569e807dfSVolodymyr Fialko perf_elt_from_vec_get(struct rte_event_vector *vec)
16669e807dfSVolodymyr Fialko {
16769e807dfSVolodymyr Fialko 	/* Timestamp for vector event stored in first element */
16869e807dfSVolodymyr Fialko 	struct rte_crypto_op *cop = vec->ptrs[0];
16969e807dfSVolodymyr Fialko 	struct rte_mbuf *m;
17069e807dfSVolodymyr Fialko 
17169e807dfSVolodymyr Fialko 	if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
17269e807dfSVolodymyr Fialko 		m = cop->sym->m_dst == NULL ? cop->sym->m_src : cop->sym->m_dst;
17369e807dfSVolodymyr Fialko 		return rte_pktmbuf_mtod(m, struct perf_elt *);
17469e807dfSVolodymyr Fialko 	} else {
17569e807dfSVolodymyr Fialko 		return RTE_PTR_ADD(cop->asym->modex.result.data, cop->asym->modex.result.length);
17669e807dfSVolodymyr Fialko 	}
17769e807dfSVolodymyr Fialko }
17869e807dfSVolodymyr Fialko 
17969e807dfSVolodymyr Fialko static __rte_always_inline int
18069e807dfSVolodymyr Fialko perf_handle_crypto_vector_ev(struct rte_event *ev, struct perf_elt **pe,
18169e807dfSVolodymyr Fialko 		const int enable_fwd_latency)
18269e807dfSVolodymyr Fialko {
18369e807dfSVolodymyr Fialko 	struct rte_event_vector *vec = ev->vec;
18469e807dfSVolodymyr Fialko 	struct rte_crypto_op *cop;
18569e807dfSVolodymyr Fialko 	struct rte_mbuf *m;
18669e807dfSVolodymyr Fialko 	int i, n = 0;
18769e807dfSVolodymyr Fialko 	void *data;
18869e807dfSVolodymyr Fialko 
18969e807dfSVolodymyr Fialko 	for (i = 0; i < vec->nb_elem; i++) {
19069e807dfSVolodymyr Fialko 		cop = vec->ptrs[i];
19169e807dfSVolodymyr Fialko 		if (unlikely(cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) {
19269e807dfSVolodymyr Fialko 			if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
19369e807dfSVolodymyr Fialko 				m = cop->sym->m_dst == NULL ? cop->sym->m_src : cop->sym->m_dst;
19469e807dfSVolodymyr Fialko 				rte_pktmbuf_free(m);
19569e807dfSVolodymyr Fialko 			} else {
19669e807dfSVolodymyr Fialko 				data = cop->asym->modex.result.data;
19769e807dfSVolodymyr Fialko 				rte_mempool_put(rte_mempool_from_obj(data), data);
19869e807dfSVolodymyr Fialko 			}
19969e807dfSVolodymyr Fialko 			rte_crypto_op_free(cop);
20069e807dfSVolodymyr Fialko 			continue;
20169e807dfSVolodymyr Fialko 		}
20269e807dfSVolodymyr Fialko 		vec->ptrs[n++] = cop;
20369e807dfSVolodymyr Fialko 	}
20469e807dfSVolodymyr Fialko 
20569e807dfSVolodymyr Fialko 	/* All cops failed, free the vector */
20669e807dfSVolodymyr Fialko 	if (n == 0) {
20769e807dfSVolodymyr Fialko 		rte_mempool_put(rte_mempool_from_obj(vec), vec);
20869e807dfSVolodymyr Fialko 		return -ENOENT;
20969e807dfSVolodymyr Fialko 	}
21069e807dfSVolodymyr Fialko 
21169e807dfSVolodymyr Fialko 	vec->nb_elem = n;
21269e807dfSVolodymyr Fialko 
21369e807dfSVolodymyr Fialko 	/* Forward latency not enabled - perf data will be not accessed */
21469e807dfSVolodymyr Fialko 	if (!enable_fwd_latency)
21569e807dfSVolodymyr Fialko 		return 0;
21669e807dfSVolodymyr Fialko 
21769e807dfSVolodymyr Fialko 	/* Get pointer to perf data */
21869e807dfSVolodymyr Fialko 	*pe = perf_elt_from_vec_get(vec);
21969e807dfSVolodymyr Fialko 
22069e807dfSVolodymyr Fialko 	return 0;
22169e807dfSVolodymyr Fialko }
2226776a581SVolodymyr Fialko 
22333011cb3SThomas Monjalon static __rte_always_inline int
224b25a66c4SAmit Prakash Shukla perf_process_last_stage(struct rte_mempool *const pool, enum evt_prod_type prod_type,
2252369f733SJerin Jacob 			struct rte_event *const ev, struct worker_data *const w,
2262369f733SJerin Jacob 			void *bufs[], int const buf_sz, uint8_t count)
2272369f733SJerin Jacob {
2286776a581SVolodymyr Fialko 	void *to_free_in_bulk;
2296776a581SVolodymyr Fialko 
23037f60fd6SFeifei Wang 	/* release fence here ensures event_prt is
23137f60fd6SFeifei Wang 	 * stored before updating the number of
23237f60fd6SFeifei Wang 	 * processed packets for worker lcores
233c7c033d1SFeifei Wang 	 */
234b6a7e685STyler Retzlaff 	rte_atomic_thread_fence(rte_memory_order_release);
235c7c033d1SFeifei Wang 	w->processed_pkts++;
2362369f733SJerin Jacob 
237b25a66c4SAmit Prakash Shukla 	if (prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
2388f5b5495SAkhil Goyal 		struct rte_crypto_op *op = ev->event_ptr;
2396776a581SVolodymyr Fialko 		struct rte_mbuf *m;
2408f5b5495SAkhil Goyal 
2416776a581SVolodymyr Fialko 		if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
2426776a581SVolodymyr Fialko 			if (op->sym->m_dst == NULL)
2436776a581SVolodymyr Fialko 				m = op->sym->m_src;
2446776a581SVolodymyr Fialko 			else
2456776a581SVolodymyr Fialko 				m = op->sym->m_dst;
2466776a581SVolodymyr Fialko 
2476776a581SVolodymyr Fialko 			to_free_in_bulk = m;
2486776a581SVolodymyr Fialko 		} else {
2496776a581SVolodymyr Fialko 			to_free_in_bulk = op->asym->modex.result.data;
2506776a581SVolodymyr Fialko 		}
2518f5b5495SAkhil Goyal 		rte_crypto_op_free(op);
2528f5b5495SAkhil Goyal 	} else {
2536776a581SVolodymyr Fialko 		to_free_in_bulk = ev->event_ptr;
2546776a581SVolodymyr Fialko 	}
2556776a581SVolodymyr Fialko 
2566776a581SVolodymyr Fialko 	bufs[count++] = to_free_in_bulk;
2572369f733SJerin Jacob 	if (unlikely(count == buf_sz)) {
2582369f733SJerin Jacob 		count = 0;
2592369f733SJerin Jacob 		rte_mempool_put_bulk(pool, bufs, buf_sz);
2602369f733SJerin Jacob 	}
2616776a581SVolodymyr Fialko 
2622369f733SJerin Jacob 	return count;
2632369f733SJerin Jacob }
2642369f733SJerin Jacob 
26533011cb3SThomas Monjalon static __rte_always_inline uint8_t
266b25a66c4SAmit Prakash Shukla perf_process_last_stage_latency(struct rte_mempool *const pool, enum evt_prod_type prod_type,
2672369f733SJerin Jacob 				struct rte_event *const ev, struct worker_data *const w,
2682369f733SJerin Jacob 				void *bufs[], int const buf_sz, uint8_t count)
2692369f733SJerin Jacob {
270*bca734c2SPavan Nikhilesh 	uint64_t latency, tstamp;
2716776a581SVolodymyr Fialko 	struct perf_elt *pe;
2726776a581SVolodymyr Fialko 	void *to_free_in_bulk;
2732369f733SJerin Jacob 
27469e807dfSVolodymyr Fialko 	/* Release fence here ensures event_prt is stored before updating the number of processed
27569e807dfSVolodymyr Fialko 	 * packets for worker lcores.
276c7c033d1SFeifei Wang 	 */
277b6a7e685STyler Retzlaff 	rte_atomic_thread_fence(rte_memory_order_release);
2782369f733SJerin Jacob 	w->processed_pkts++;
2792369f733SJerin Jacob 
280b25a66c4SAmit Prakash Shukla 	if (prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
2816776a581SVolodymyr Fialko 		struct rte_crypto_op *op = ev->event_ptr;
2826776a581SVolodymyr Fialko 		struct rte_mbuf *m;
2836776a581SVolodymyr Fialko 
2846776a581SVolodymyr Fialko 		if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
2856776a581SVolodymyr Fialko 			if (op->sym->m_dst == NULL)
2866776a581SVolodymyr Fialko 				m = op->sym->m_src;
2876776a581SVolodymyr Fialko 			else
2886776a581SVolodymyr Fialko 				m = op->sym->m_dst;
2896776a581SVolodymyr Fialko 
2906776a581SVolodymyr Fialko 			to_free_in_bulk = m;
2916776a581SVolodymyr Fialko 			pe = rte_pktmbuf_mtod(m, struct perf_elt *);
2928f5b5495SAkhil Goyal 		} else {
2936776a581SVolodymyr Fialko 			pe = RTE_PTR_ADD(op->asym->modex.result.data,
2946776a581SVolodymyr Fialko 					 op->asym->modex.result.length);
2956776a581SVolodymyr Fialko 			to_free_in_bulk = op->asym->modex.result.data;
2966776a581SVolodymyr Fialko 		}
297*bca734c2SPavan Nikhilesh 		tstamp = pe->timestamp;
2986776a581SVolodymyr Fialko 		rte_crypto_op_free(op);
299b25a66c4SAmit Prakash Shukla 	} else if (prod_type == EVT_PROD_TYPE_EVENT_DMA_ADPTR) {
300*bca734c2SPavan Nikhilesh 		struct rte_event_dma_adapter_op *op = ev->event_ptr;
301*bca734c2SPavan Nikhilesh 
302*bca734c2SPavan Nikhilesh 		to_free_in_bulk = op;
303*bca734c2SPavan Nikhilesh 		tstamp = op->user_meta;
3042369f733SJerin Jacob 	} else {
3056776a581SVolodymyr Fialko 		pe = ev->event_ptr;
306*bca734c2SPavan Nikhilesh 		tstamp = pe->timestamp;
3076776a581SVolodymyr Fialko 		to_free_in_bulk = pe;
3082369f733SJerin Jacob 	}
3092369f733SJerin Jacob 
310*bca734c2SPavan Nikhilesh 	latency = rte_get_timer_cycles() - tstamp;
3112369f733SJerin Jacob 	w->latency += latency;
3126776a581SVolodymyr Fialko 
3136776a581SVolodymyr Fialko 	bufs[count++] = to_free_in_bulk;
3146776a581SVolodymyr Fialko 	if (unlikely(count == buf_sz)) {
3156776a581SVolodymyr Fialko 		count = 0;
3166776a581SVolodymyr Fialko 		rte_mempool_put_bulk(pool, bufs, buf_sz);
3178f5b5495SAkhil Goyal 	}
3186776a581SVolodymyr Fialko 
3192369f733SJerin Jacob 	return count;
3202369f733SJerin Jacob }
3212369f733SJerin Jacob 
32269e807dfSVolodymyr Fialko static __rte_always_inline void
32369e807dfSVolodymyr Fialko perf_process_vector_last_stage(struct rte_mempool *const pool,
32469e807dfSVolodymyr Fialko 		struct rte_mempool *const ca_pool, struct rte_event *const ev,
32569e807dfSVolodymyr Fialko 		struct worker_data *const w, const bool enable_fwd_latency)
32669e807dfSVolodymyr Fialko {
32769e807dfSVolodymyr Fialko 	struct rte_event_vector *vec = ev->vec;
32869e807dfSVolodymyr Fialko 	struct rte_crypto_op *cop;
32969e807dfSVolodymyr Fialko 	void *bufs[vec->nb_elem];
33069e807dfSVolodymyr Fialko 	struct perf_elt *pe;
33169e807dfSVolodymyr Fialko 	uint64_t latency;
33269e807dfSVolodymyr Fialko 	int i;
33369e807dfSVolodymyr Fialko 
33469e807dfSVolodymyr Fialko 	/* Release fence here ensures event_prt is stored before updating the number of processed
33569e807dfSVolodymyr Fialko 	 * packets for worker lcores.
33669e807dfSVolodymyr Fialko 	 */
337b6a7e685STyler Retzlaff 	rte_atomic_thread_fence(rte_memory_order_release);
33869e807dfSVolodymyr Fialko 	w->processed_pkts += vec->nb_elem;
33969e807dfSVolodymyr Fialko 
34069e807dfSVolodymyr Fialko 	if (enable_fwd_latency) {
34169e807dfSVolodymyr Fialko 		pe = perf_elt_from_vec_get(vec);
34269e807dfSVolodymyr Fialko 		latency = rte_get_timer_cycles() - pe->timestamp;
34369e807dfSVolodymyr Fialko 		w->latency += latency;
34469e807dfSVolodymyr Fialko 	}
34569e807dfSVolodymyr Fialko 
34669e807dfSVolodymyr Fialko 	for (i = 0; i < vec->nb_elem; i++) {
34769e807dfSVolodymyr Fialko 		cop = vec->ptrs[i];
34869e807dfSVolodymyr Fialko 		if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)
34969e807dfSVolodymyr Fialko 			bufs[i] = cop->sym->m_dst == NULL ? cop->sym->m_src : cop->sym->m_dst;
35069e807dfSVolodymyr Fialko 		else
35169e807dfSVolodymyr Fialko 			bufs[i] = cop->asym->modex.result.data;
35269e807dfSVolodymyr Fialko 	}
35369e807dfSVolodymyr Fialko 
35469e807dfSVolodymyr Fialko 	rte_mempool_put_bulk(pool, bufs, vec->nb_elem);
35569e807dfSVolodymyr Fialko 	rte_mempool_put_bulk(ca_pool, (void * const *)vec->ptrs, vec->nb_elem);
35669e807dfSVolodymyr Fialko 	rte_mempool_put(rte_mempool_from_obj(vec), vec);
35769e807dfSVolodymyr Fialko }
3582369f733SJerin Jacob 
359272de067SJerin Jacob static inline int
360272de067SJerin Jacob perf_nb_event_ports(struct evt_options *opt)
361272de067SJerin Jacob {
362272de067SJerin Jacob 	return evt_nr_active_lcores(opt->wlcores) +
363272de067SJerin Jacob 			evt_nr_active_lcores(opt->plcores);
364272de067SJerin Jacob }
365272de067SJerin Jacob 
36641c219e6SJerin Jacob int perf_test_result(struct evt_test *test, struct evt_options *opt);
367272de067SJerin Jacob int perf_opt_check(struct evt_options *opt, uint64_t nb_queues);
368ffbae86fSJerin Jacob int perf_test_setup(struct evt_test *test, struct evt_options *opt);
3693fc8de4fSPavan Nikhilesh int perf_ethdev_setup(struct evt_test *test, struct evt_options *opt);
370de2bc16eSShijith Thotton int perf_cryptodev_setup(struct evt_test *test, struct evt_options *opt);
371b25a66c4SAmit Prakash Shukla int perf_dmadev_setup(struct evt_test *test, struct evt_options *opt);
37241c219e6SJerin Jacob int perf_mempool_setup(struct evt_test *test, struct evt_options *opt);
37384a7513dSJerin Jacob int perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
374535c630cSPavan Nikhilesh 				uint8_t stride, uint8_t nb_queues,
375535c630cSPavan Nikhilesh 				const struct rte_event_port_conf *port_conf);
37657305d79SPavan Nikhilesh int perf_event_dev_service_setup(uint8_t dev_id);
3779d3aeb18SJerin Jacob int perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
3789d3aeb18SJerin Jacob 		int (*worker)(void *));
379272de067SJerin Jacob void perf_opt_dump(struct evt_options *opt, uint8_t nb_queues);
380ffbae86fSJerin Jacob void perf_test_destroy(struct evt_test *test, struct evt_options *opt);
38141c219e6SJerin Jacob void perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
382de2bc16eSShijith Thotton void perf_cryptodev_destroy(struct evt_test *test, struct evt_options *opt);
383b25a66c4SAmit Prakash Shukla void perf_dmadev_destroy(struct evt_test *test, struct evt_options *opt);
3847f3daf34SPavan Nikhilesh void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
385a734e738SPavan Nikhilesh void perf_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt);
38641c219e6SJerin Jacob void perf_mempool_destroy(struct evt_test *test, struct evt_options *opt);
387f0b68c0bSPavan Nikhilesh void perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id,
388f0b68c0bSPavan Nikhilesh 			 uint8_t port_id, struct rte_event events[],
389f0b68c0bSPavan Nikhilesh 			 uint16_t nb_enq, uint16_t nb_deq);
390ffbae86fSJerin Jacob 
391ffbae86fSJerin Jacob #endif /* _TEST_PERF_COMMON_ */
392