xref: /dpdk/examples/eventdev_pipeline/pipeline_common.h (revision 6d239dd5295a8249a296ae9f0a5bc9802fea073e)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright 2016 Intel Corporation.
4  * Copyright 2017 Cavium, Inc.
5  */
6 
7 #include <stdbool.h>
8 
9 #include <rte_eal.h>
10 #include <rte_mempool.h>
11 #include <rte_mbuf.h>
12 #include <rte_launch.h>
13 #include <rte_malloc.h>
14 #include <rte_random.h>
15 #include <rte_cycles.h>
16 #include <rte_ethdev.h>
17 #include <rte_eventdev.h>
18 #include <rte_event_eth_rx_adapter.h>
19 #include <rte_service.h>
20 #include <rte_service_component.h>
21 
22 #define MAX_NUM_STAGES 8
23 #define BATCH_SIZE 16
24 #define MAX_NUM_CORE 64
25 
26 struct cons_data {
27 	uint8_t dev_id;
28 	uint8_t port_id;
29 	uint8_t release;
30 } __rte_cache_aligned;
31 
32 struct worker_data {
33 	uint8_t dev_id;
34 	uint8_t port_id;
35 } __rte_cache_aligned;
36 
37 typedef int (*worker_loop)(void *);
38 typedef int (*consumer_loop)(void);
39 typedef void (*schedule_loop)(unsigned int);
40 typedef int (*eventdev_setup)(struct cons_data *, struct worker_data *);
41 typedef void (*rx_adapter_setup)(uint16_t nb_ports);
42 typedef void (*opt_check)(void);
43 
44 struct setup_data {
45 	worker_loop worker;
46 	consumer_loop consumer;
47 	schedule_loop scheduler;
48 	eventdev_setup evdev_setup;
49 	rx_adapter_setup adptr_setup;
50 	opt_check check_opt;
51 };
52 
53 struct fastpath_data {
54 	volatile int done;
55 	uint32_t tx_lock;
56 	uint32_t evdev_service_id;
57 	uint32_t rxadptr_service_id;
58 	bool rx_single;
59 	bool tx_single;
60 	bool sched_single;
61 	unsigned int rx_core[MAX_NUM_CORE];
62 	unsigned int tx_core[MAX_NUM_CORE];
63 	unsigned int sched_core[MAX_NUM_CORE];
64 	unsigned int worker_core[MAX_NUM_CORE];
65 	struct rte_eth_dev_tx_buffer *tx_buf[RTE_MAX_ETHPORTS];
66 	struct setup_data cap;
67 } __rte_cache_aligned;
68 
69 struct config_data {
70 	unsigned int active_cores;
71 	unsigned int num_workers;
72 	int64_t num_packets;
73 	uint64_t num_mbuf;
74 	unsigned int num_fids;
75 	int queue_type;
76 	int worker_cycles;
77 	int enable_queue_priorities;
78 	int quiet;
79 	int dump_dev;
80 	int dump_dev_signal;
81 	int all_type_queues;
82 	unsigned int num_stages;
83 	unsigned int worker_cq_depth;
84 	unsigned int rx_stride;
85 	/* Use rx stride value to reduce congestion in entry queue when using
86 	 * multiple eth ports by forming multiple event queue pipelines.
87 	 */
88 	int16_t next_qid[MAX_NUM_STAGES+2];
89 	int16_t qid[MAX_NUM_STAGES];
90 	uint8_t rx_adapter_id;
91 	uint64_t worker_lcore_mask;
92 	uint64_t rx_lcore_mask;
93 	uint64_t tx_lcore_mask;
94 	uint64_t sched_lcore_mask;
95 };
96 
97 struct port_link {
98 	uint8_t queue_id;
99 	uint8_t priority;
100 };
101 
102 struct cons_data cons_data;
103 
104 struct fastpath_data *fdata;
105 struct config_data cdata;
106 
107 static __rte_always_inline void
108 exchange_mac(struct rte_mbuf *m)
109 {
110 	struct ether_hdr *eth;
111 	struct ether_addr addr;
112 
113 	/* change mac addresses on packet (to use mbuf data) */
114 	eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
115 	ether_addr_copy(&eth->d_addr, &addr);
116 	ether_addr_copy(&addr, &eth->d_addr);
117 }
118 
119 static __rte_always_inline void
120 work(void)
121 {
122 	/* do a number of cycles of work per packet */
123 	volatile uint64_t start_tsc = rte_rdtsc();
124 	while (rte_rdtsc() < start_tsc + cdata.worker_cycles)
125 		rte_pause();
126 }
127 
128 static __rte_always_inline void
129 schedule_devices(unsigned int lcore_id)
130 {
131 	if (fdata->rx_core[lcore_id]) {
132 		rte_service_run_iter_on_app_lcore(fdata->rxadptr_service_id,
133 				!fdata->rx_single);
134 	}
135 
136 	if (fdata->sched_core[lcore_id]) {
137 		rte_service_run_iter_on_app_lcore(fdata->evdev_service_id,
138 				!fdata->sched_single);
139 		if (cdata.dump_dev_signal) {
140 			rte_event_dev_dump(0, stdout);
141 			cdata.dump_dev_signal = 0;
142 		}
143 	}
144 
145 	if (fdata->tx_core[lcore_id] && (fdata->tx_single ||
146 			 rte_atomic32_cmpset(&(fdata->tx_lock), 0, 1))) {
147 		fdata->cap.consumer();
148 		rte_atomic32_clear((rte_atomic32_t *)&(fdata->tx_lock));
149 	}
150 }
151 
152 void set_worker_generic_setup_data(struct setup_data *caps, bool burst);
153 void set_worker_tx_setup_data(struct setup_data *caps, bool burst);
154