xref: /dpdk/examples/eventdev_pipeline/pipeline_common.h (revision 6d239dd5295a8249a296ae9f0a5bc9802fea073e)
1*6d239dd5SPavan Nikhilesh /*
2*6d239dd5SPavan Nikhilesh  * SPDX-License-Identifier: BSD-3-Clause
3*6d239dd5SPavan Nikhilesh  * Copyright 2016 Intel Corporation.
4*6d239dd5SPavan Nikhilesh  * Copyright 2017 Cavium, Inc.
5*6d239dd5SPavan Nikhilesh  */
6*6d239dd5SPavan Nikhilesh 
7*6d239dd5SPavan Nikhilesh #include <stdbool.h>
8*6d239dd5SPavan Nikhilesh 
9*6d239dd5SPavan Nikhilesh #include <rte_eal.h>
10*6d239dd5SPavan Nikhilesh #include <rte_mempool.h>
11*6d239dd5SPavan Nikhilesh #include <rte_mbuf.h>
12*6d239dd5SPavan Nikhilesh #include <rte_launch.h>
13*6d239dd5SPavan Nikhilesh #include <rte_malloc.h>
14*6d239dd5SPavan Nikhilesh #include <rte_random.h>
15*6d239dd5SPavan Nikhilesh #include <rte_cycles.h>
16*6d239dd5SPavan Nikhilesh #include <rte_ethdev.h>
17*6d239dd5SPavan Nikhilesh #include <rte_eventdev.h>
18*6d239dd5SPavan Nikhilesh #include <rte_event_eth_rx_adapter.h>
19*6d239dd5SPavan Nikhilesh #include <rte_service.h>
20*6d239dd5SPavan Nikhilesh #include <rte_service_component.h>
21*6d239dd5SPavan Nikhilesh 
22*6d239dd5SPavan Nikhilesh #define MAX_NUM_STAGES 8
23*6d239dd5SPavan Nikhilesh #define BATCH_SIZE 16
24*6d239dd5SPavan Nikhilesh #define MAX_NUM_CORE 64
25*6d239dd5SPavan Nikhilesh 
26*6d239dd5SPavan Nikhilesh struct cons_data {
27*6d239dd5SPavan Nikhilesh 	uint8_t dev_id;
28*6d239dd5SPavan Nikhilesh 	uint8_t port_id;
29*6d239dd5SPavan Nikhilesh 	uint8_t release;
30*6d239dd5SPavan Nikhilesh } __rte_cache_aligned;
31*6d239dd5SPavan Nikhilesh 
32*6d239dd5SPavan Nikhilesh struct worker_data {
33*6d239dd5SPavan Nikhilesh 	uint8_t dev_id;
34*6d239dd5SPavan Nikhilesh 	uint8_t port_id;
35*6d239dd5SPavan Nikhilesh } __rte_cache_aligned;
36*6d239dd5SPavan Nikhilesh 
37*6d239dd5SPavan Nikhilesh typedef int (*worker_loop)(void *);
38*6d239dd5SPavan Nikhilesh typedef int (*consumer_loop)(void);
39*6d239dd5SPavan Nikhilesh typedef void (*schedule_loop)(unsigned int);
40*6d239dd5SPavan Nikhilesh typedef int (*eventdev_setup)(struct cons_data *, struct worker_data *);
41*6d239dd5SPavan Nikhilesh typedef void (*rx_adapter_setup)(uint16_t nb_ports);
42*6d239dd5SPavan Nikhilesh typedef void (*opt_check)(void);
43*6d239dd5SPavan Nikhilesh 
44*6d239dd5SPavan Nikhilesh struct setup_data {
45*6d239dd5SPavan Nikhilesh 	worker_loop worker;
46*6d239dd5SPavan Nikhilesh 	consumer_loop consumer;
47*6d239dd5SPavan Nikhilesh 	schedule_loop scheduler;
48*6d239dd5SPavan Nikhilesh 	eventdev_setup evdev_setup;
49*6d239dd5SPavan Nikhilesh 	rx_adapter_setup adptr_setup;
50*6d239dd5SPavan Nikhilesh 	opt_check check_opt;
51*6d239dd5SPavan Nikhilesh };
52*6d239dd5SPavan Nikhilesh 
53*6d239dd5SPavan Nikhilesh struct fastpath_data {
54*6d239dd5SPavan Nikhilesh 	volatile int done;
55*6d239dd5SPavan Nikhilesh 	uint32_t tx_lock;
56*6d239dd5SPavan Nikhilesh 	uint32_t evdev_service_id;
57*6d239dd5SPavan Nikhilesh 	uint32_t rxadptr_service_id;
58*6d239dd5SPavan Nikhilesh 	bool rx_single;
59*6d239dd5SPavan Nikhilesh 	bool tx_single;
60*6d239dd5SPavan Nikhilesh 	bool sched_single;
61*6d239dd5SPavan Nikhilesh 	unsigned int rx_core[MAX_NUM_CORE];
62*6d239dd5SPavan Nikhilesh 	unsigned int tx_core[MAX_NUM_CORE];
63*6d239dd5SPavan Nikhilesh 	unsigned int sched_core[MAX_NUM_CORE];
64*6d239dd5SPavan Nikhilesh 	unsigned int worker_core[MAX_NUM_CORE];
65*6d239dd5SPavan Nikhilesh 	struct rte_eth_dev_tx_buffer *tx_buf[RTE_MAX_ETHPORTS];
66*6d239dd5SPavan Nikhilesh 	struct setup_data cap;
67*6d239dd5SPavan Nikhilesh } __rte_cache_aligned;
68*6d239dd5SPavan Nikhilesh 
69*6d239dd5SPavan Nikhilesh struct config_data {
70*6d239dd5SPavan Nikhilesh 	unsigned int active_cores;
71*6d239dd5SPavan Nikhilesh 	unsigned int num_workers;
72*6d239dd5SPavan Nikhilesh 	int64_t num_packets;
73*6d239dd5SPavan Nikhilesh 	uint64_t num_mbuf;
74*6d239dd5SPavan Nikhilesh 	unsigned int num_fids;
75*6d239dd5SPavan Nikhilesh 	int queue_type;
76*6d239dd5SPavan Nikhilesh 	int worker_cycles;
77*6d239dd5SPavan Nikhilesh 	int enable_queue_priorities;
78*6d239dd5SPavan Nikhilesh 	int quiet;
79*6d239dd5SPavan Nikhilesh 	int dump_dev;
80*6d239dd5SPavan Nikhilesh 	int dump_dev_signal;
81*6d239dd5SPavan Nikhilesh 	int all_type_queues;
82*6d239dd5SPavan Nikhilesh 	unsigned int num_stages;
83*6d239dd5SPavan Nikhilesh 	unsigned int worker_cq_depth;
84*6d239dd5SPavan Nikhilesh 	unsigned int rx_stride;
85*6d239dd5SPavan Nikhilesh 	/* Use rx stride value to reduce congestion in entry queue when using
86*6d239dd5SPavan Nikhilesh 	 * multiple eth ports by forming multiple event queue pipelines.
87*6d239dd5SPavan Nikhilesh 	 */
88*6d239dd5SPavan Nikhilesh 	int16_t next_qid[MAX_NUM_STAGES+2];
89*6d239dd5SPavan Nikhilesh 	int16_t qid[MAX_NUM_STAGES];
90*6d239dd5SPavan Nikhilesh 	uint8_t rx_adapter_id;
91*6d239dd5SPavan Nikhilesh 	uint64_t worker_lcore_mask;
92*6d239dd5SPavan Nikhilesh 	uint64_t rx_lcore_mask;
93*6d239dd5SPavan Nikhilesh 	uint64_t tx_lcore_mask;
94*6d239dd5SPavan Nikhilesh 	uint64_t sched_lcore_mask;
95*6d239dd5SPavan Nikhilesh };
96*6d239dd5SPavan Nikhilesh 
97*6d239dd5SPavan Nikhilesh struct port_link {
98*6d239dd5SPavan Nikhilesh 	uint8_t queue_id;
99*6d239dd5SPavan Nikhilesh 	uint8_t priority;
100*6d239dd5SPavan Nikhilesh };
101*6d239dd5SPavan Nikhilesh 
102*6d239dd5SPavan Nikhilesh struct cons_data cons_data;
103*6d239dd5SPavan Nikhilesh 
104*6d239dd5SPavan Nikhilesh struct fastpath_data *fdata;
105*6d239dd5SPavan Nikhilesh struct config_data cdata;
106*6d239dd5SPavan Nikhilesh 
107*6d239dd5SPavan Nikhilesh static __rte_always_inline void
108*6d239dd5SPavan Nikhilesh exchange_mac(struct rte_mbuf *m)
109*6d239dd5SPavan Nikhilesh {
110*6d239dd5SPavan Nikhilesh 	struct ether_hdr *eth;
111*6d239dd5SPavan Nikhilesh 	struct ether_addr addr;
112*6d239dd5SPavan Nikhilesh 
113*6d239dd5SPavan Nikhilesh 	/* change mac addresses on packet (to use mbuf data) */
114*6d239dd5SPavan Nikhilesh 	eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
115*6d239dd5SPavan Nikhilesh 	ether_addr_copy(&eth->d_addr, &addr);
116*6d239dd5SPavan Nikhilesh 	ether_addr_copy(&addr, &eth->d_addr);
117*6d239dd5SPavan Nikhilesh }
118*6d239dd5SPavan Nikhilesh 
119*6d239dd5SPavan Nikhilesh static __rte_always_inline void
120*6d239dd5SPavan Nikhilesh work(void)
121*6d239dd5SPavan Nikhilesh {
122*6d239dd5SPavan Nikhilesh 	/* do a number of cycles of work per packet */
123*6d239dd5SPavan Nikhilesh 	volatile uint64_t start_tsc = rte_rdtsc();
124*6d239dd5SPavan Nikhilesh 	while (rte_rdtsc() < start_tsc + cdata.worker_cycles)
125*6d239dd5SPavan Nikhilesh 		rte_pause();
126*6d239dd5SPavan Nikhilesh }
127*6d239dd5SPavan Nikhilesh 
128*6d239dd5SPavan Nikhilesh static __rte_always_inline void
129*6d239dd5SPavan Nikhilesh schedule_devices(unsigned int lcore_id)
130*6d239dd5SPavan Nikhilesh {
131*6d239dd5SPavan Nikhilesh 	if (fdata->rx_core[lcore_id]) {
132*6d239dd5SPavan Nikhilesh 		rte_service_run_iter_on_app_lcore(fdata->rxadptr_service_id,
133*6d239dd5SPavan Nikhilesh 				!fdata->rx_single);
134*6d239dd5SPavan Nikhilesh 	}
135*6d239dd5SPavan Nikhilesh 
136*6d239dd5SPavan Nikhilesh 	if (fdata->sched_core[lcore_id]) {
137*6d239dd5SPavan Nikhilesh 		rte_service_run_iter_on_app_lcore(fdata->evdev_service_id,
138*6d239dd5SPavan Nikhilesh 				!fdata->sched_single);
139*6d239dd5SPavan Nikhilesh 		if (cdata.dump_dev_signal) {
140*6d239dd5SPavan Nikhilesh 			rte_event_dev_dump(0, stdout);
141*6d239dd5SPavan Nikhilesh 			cdata.dump_dev_signal = 0;
142*6d239dd5SPavan Nikhilesh 		}
143*6d239dd5SPavan Nikhilesh 	}
144*6d239dd5SPavan Nikhilesh 
145*6d239dd5SPavan Nikhilesh 	if (fdata->tx_core[lcore_id] && (fdata->tx_single ||
146*6d239dd5SPavan Nikhilesh 			 rte_atomic32_cmpset(&(fdata->tx_lock), 0, 1))) {
147*6d239dd5SPavan Nikhilesh 		fdata->cap.consumer();
148*6d239dd5SPavan Nikhilesh 		rte_atomic32_clear((rte_atomic32_t *)&(fdata->tx_lock));
149*6d239dd5SPavan Nikhilesh 	}
150*6d239dd5SPavan Nikhilesh }
151*6d239dd5SPavan Nikhilesh 
152*6d239dd5SPavan Nikhilesh void set_worker_generic_setup_data(struct setup_data *caps, bool burst);
153*6d239dd5SPavan Nikhilesh void set_worker_tx_setup_data(struct setup_data *caps, bool burst);
154