xref: /dpdk/examples/eventdev_pipeline/pipeline_common.h (revision 7e06c0de1952d3109a5b0c4779d7e7d8059c9d78)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright 2016 Intel Corporation.
4  * Copyright 2017 Cavium, Inc.
5  */
6 
7 #include <stdbool.h>
8 
9 #include <rte_eal.h>
10 #include <rte_mempool.h>
11 #include <rte_mbuf.h>
12 #include <rte_launch.h>
13 #include <rte_malloc.h>
14 #include <rte_random.h>
15 #include <rte_cycles.h>
16 #include <rte_ethdev.h>
17 #include <rte_eventdev.h>
18 #include <rte_event_eth_rx_adapter.h>
19 #include <rte_event_eth_tx_adapter.h>
20 #include <rte_service.h>
21 #include <rte_service_component.h>
22 
23 #define MAX_NUM_STAGES 8
24 #define BATCH_SIZE 16
25 #define MAX_NUM_CORE 64
26 
27 struct __rte_cache_aligned worker_data {
28 	uint8_t dev_id;
29 	uint8_t port_id;
30 };
31 
32 typedef int (*worker_loop)(void *);
33 typedef void (*schedule_loop)(unsigned int);
34 typedef int (*eventdev_setup)(struct worker_data *);
35 typedef void (*adapter_setup)(uint16_t nb_ports);
36 typedef void (*opt_check)(void);
37 
38 struct setup_data {
39 	worker_loop worker;
40 	schedule_loop scheduler;
41 	eventdev_setup evdev_setup;
42 	adapter_setup adptr_setup;
43 	opt_check check_opt;
44 };
45 
46 struct __rte_cache_aligned fastpath_data {
47 	volatile int done;
48 	uint32_t evdev_service_id;
49 	uint32_t rxadptr_service_id;
50 	uint32_t txadptr_service_id;
51 	bool rx_single;
52 	bool tx_single;
53 	bool sched_single;
54 	uint64_t rx_core[MAX_NUM_CORE];
55 	uint64_t tx_core[MAX_NUM_CORE];
56 	uint64_t sched_core[MAX_NUM_CORE];
57 	uint64_t worker_core[MAX_NUM_CORE];
58 	struct setup_data cap;
59 };
60 
61 struct config_data {
62 	unsigned int active_cores;
63 	unsigned int num_workers;
64 	int64_t num_packets;
65 	uint64_t num_mbuf;
66 	unsigned int num_fids;
67 	int queue_type;
68 	int worker_cycles;
69 	int enable_queue_priorities;
70 	int quiet;
71 	int dump_dev;
72 	int dump_dev_signal;
73 	int all_type_queues;
74 	unsigned int num_stages;
75 	unsigned int worker_cq_depth;
76 	unsigned int rx_stride;
77 	/* Use rx stride value to reduce congestion in entry queue when using
78 	 * multiple eth ports by forming multiple event queue pipelines.
79 	 */
80 	int16_t next_qid[MAX_NUM_STAGES+2];
81 	int16_t qid[MAX_NUM_STAGES];
82 	uint8_t rx_adapter_id;
83 	uint8_t tx_adapter_id;
84 	uint8_t tx_queue_id;
85 	uint64_t worker_lcore_mask;
86 	uint64_t rx_lcore_mask;
87 	uint64_t tx_lcore_mask;
88 	uint64_t sched_lcore_mask;
89 };
90 
91 struct port_link {
92 	uint8_t queue_id;
93 	uint8_t priority;
94 };
95 
96 extern struct fastpath_data *fdata;
97 extern struct config_data cdata;
98 
99 static __rte_always_inline void
exchange_mac(struct rte_mbuf * m)100 exchange_mac(struct rte_mbuf *m)
101 {
102 	struct rte_ether_hdr *eth;
103 	struct rte_ether_addr addr;
104 
105 	/* change mac addresses on packet (to use mbuf data) */
106 	eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
107 	rte_ether_addr_copy(&eth->dst_addr, &addr);
108 	rte_ether_addr_copy(&addr, &eth->dst_addr);
109 }
110 
111 static __rte_always_inline void
work(void)112 work(void)
113 {
114 	/* do a number of cycles of work per packet */
115 	volatile uint64_t start_tsc = rte_rdtsc();
116 	while (rte_rdtsc() < start_tsc + cdata.worker_cycles)
117 		rte_pause();
118 }
119 
120 static __rte_always_inline void
schedule_devices(unsigned int lcore_id)121 schedule_devices(unsigned int lcore_id)
122 {
123 	if (fdata->rx_core[lcore_id]) {
124 		rte_service_run_iter_on_app_lcore(fdata->rxadptr_service_id,
125 				!fdata->rx_single);
126 	}
127 
128 	if (fdata->sched_core[lcore_id]) {
129 		rte_service_run_iter_on_app_lcore(fdata->evdev_service_id,
130 				!fdata->sched_single);
131 		if (cdata.dump_dev_signal) {
132 			rte_event_dev_dump(0, stdout);
133 			cdata.dump_dev_signal = 0;
134 		}
135 	}
136 
137 	if (fdata->tx_core[lcore_id]) {
138 		rte_service_run_iter_on_app_lcore(fdata->txadptr_service_id,
139 				!fdata->tx_single);
140 	}
141 }
142 
143 static void
event_port_flush(uint8_t dev_id __rte_unused,struct rte_event ev,void * args __rte_unused)144 event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
145 		 void *args __rte_unused)
146 {
147 	rte_mempool_put(args, ev.event_ptr);
148 }
149 
150 static inline void
worker_cleanup(uint8_t dev_id,uint8_t port_id,struct rte_event events[],uint16_t nb_enq,uint16_t nb_deq)151 worker_cleanup(uint8_t dev_id, uint8_t port_id, struct rte_event events[],
152 	       uint16_t nb_enq, uint16_t nb_deq)
153 {
154 	int i;
155 
156 	if (!(nb_deq - nb_enq))
157 		return;
158 
159 	if (nb_deq) {
160 		for (i = nb_enq; i < nb_deq; i++) {
161 			if (events[i].op == RTE_EVENT_OP_RELEASE)
162 				continue;
163 			rte_pktmbuf_free(events[i].mbuf);
164 		}
165 
166 		for (i = 0; i < nb_deq; i++)
167 			events[i].op = RTE_EVENT_OP_RELEASE;
168 		rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
169 	}
170 
171 	rte_event_port_quiesce(dev_id, port_id, event_port_flush, NULL);
172 }
173 
174 void set_worker_generic_setup_data(struct setup_data *caps, bool burst);
175 void set_worker_tx_enq_setup_data(struct setup_data *caps, bool burst);
176