1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
3 */
4
5 #include <stdalign.h>
6 #include <stdint.h>
7 #include <stdlib.h>
8 #include <inttypes.h>
9 #include <getopt.h>
10 #include <rte_eal.h>
11 #include <rte_ethdev.h>
12 #include <rte_cycles.h>
13 #include <rte_lcore.h>
14 #include <rte_mbuf.h>
15 #include <rte_mbuf_dyn.h>
16
17 #define RX_RING_SIZE 1024
18 #define TX_RING_SIZE 1024
19
20 #define NUM_MBUFS 8191
21 #define MBUF_CACHE_SIZE 250
22 #define BURST_SIZE 32
23
24 static int hwts_dynfield_offset = -1;
25
26 static inline rte_mbuf_timestamp_t *
hwts_field(struct rte_mbuf * mbuf)27 hwts_field(struct rte_mbuf *mbuf)
28 {
29 return RTE_MBUF_DYNFIELD(mbuf,
30 hwts_dynfield_offset, rte_mbuf_timestamp_t *);
31 }
32
33 typedef uint64_t tsc_t;
34 static int tsc_dynfield_offset = -1;
35
36 static inline tsc_t *
tsc_field(struct rte_mbuf * mbuf)37 tsc_field(struct rte_mbuf *mbuf)
38 {
39 return RTE_MBUF_DYNFIELD(mbuf, tsc_dynfield_offset, tsc_t *);
40 }
41
42 static const char usage[] =
43 "%s EAL_ARGS -- [-t]\n";
44
45 static struct {
46 uint64_t total_cycles;
47 uint64_t total_queue_cycles;
48 uint64_t total_pkts;
49 } latency_numbers;
50
51 int hw_timestamping;
52
53 #define TICKS_PER_CYCLE_SHIFT 16
54 static uint64_t ticks_per_cycle_mult;
55
56 /* Callback added to the RX port and applied to packets. 8< */
57 static uint16_t
add_timestamps(uint16_t port __rte_unused,uint16_t qidx __rte_unused,struct rte_mbuf ** pkts,uint16_t nb_pkts,uint16_t max_pkts __rte_unused,void * _ __rte_unused)58 add_timestamps(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
59 struct rte_mbuf **pkts, uint16_t nb_pkts,
60 uint16_t max_pkts __rte_unused, void *_ __rte_unused)
61 {
62 unsigned i;
63 uint64_t now = rte_rdtsc();
64
65 for (i = 0; i < nb_pkts; i++)
66 *tsc_field(pkts[i]) = now;
67 return nb_pkts;
68 }
69 /* >8 End of callback addition and application. */
70
71 /* Callback is added to the TX port. 8< */
72 static uint16_t
calc_latency(uint16_t port,uint16_t qidx __rte_unused,struct rte_mbuf ** pkts,uint16_t nb_pkts,void * _ __rte_unused)73 calc_latency(uint16_t port, uint16_t qidx __rte_unused,
74 struct rte_mbuf **pkts, uint16_t nb_pkts, void *_ __rte_unused)
75 {
76 uint64_t cycles = 0;
77 uint64_t queue_ticks = 0;
78 uint64_t now = rte_rdtsc();
79 uint64_t ticks;
80 unsigned i;
81
82 if (hw_timestamping)
83 rte_eth_read_clock(port, &ticks);
84
85 for (i = 0; i < nb_pkts; i++) {
86 cycles += now - *tsc_field(pkts[i]);
87 if (hw_timestamping)
88 queue_ticks += ticks - *hwts_field(pkts[i]);
89 }
90
91 latency_numbers.total_cycles += cycles;
92 if (hw_timestamping)
93 latency_numbers.total_queue_cycles += (queue_ticks
94 * ticks_per_cycle_mult) >> TICKS_PER_CYCLE_SHIFT;
95
96 latency_numbers.total_pkts += nb_pkts;
97
98 if (latency_numbers.total_pkts > (100 * 1000 * 1000ULL)) {
99 printf("Latency = %"PRIu64" cycles\n",
100 latency_numbers.total_cycles / latency_numbers.total_pkts);
101 if (hw_timestamping) {
102 printf("Latency from HW = %"PRIu64" cycles\n",
103 latency_numbers.total_queue_cycles
104 / latency_numbers.total_pkts);
105 }
106 latency_numbers.total_cycles = 0;
107 latency_numbers.total_queue_cycles = 0;
108 latency_numbers.total_pkts = 0;
109 }
110 return nb_pkts;
111 }
112 /* >8 End of callback addition. */
113
114 /*
115 * Initialises a given port using global settings and with the rx buffers
116 * coming from the mbuf_pool passed as parameter
117 */
118
119 /* Port initialization. 8< */
120 static inline int
port_init(uint16_t port,struct rte_mempool * mbuf_pool)121 port_init(uint16_t port, struct rte_mempool *mbuf_pool)
122 {
123 struct rte_eth_conf port_conf;
124 const uint16_t rx_rings = 1, tx_rings = 1;
125 uint16_t nb_rxd = RX_RING_SIZE;
126 uint16_t nb_txd = TX_RING_SIZE;
127 int retval;
128 uint16_t q;
129 struct rte_eth_dev_info dev_info;
130 struct rte_eth_rxconf rxconf;
131 struct rte_eth_txconf txconf;
132
133 if (!rte_eth_dev_is_valid_port(port))
134 return -1;
135
136 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
137
138 retval = rte_eth_dev_info_get(port, &dev_info);
139 if (retval != 0) {
140 printf("Error during getting device (port %u) info: %s\n",
141 port, strerror(-retval));
142
143 return retval;
144 }
145
146 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
147 port_conf.txmode.offloads |=
148 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
149
150 if (hw_timestamping) {
151 if (!(dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
152 printf("\nERROR: Port %u does not support hardware timestamping\n"
153 , port);
154 return -1;
155 }
156 port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
157 rte_mbuf_dyn_rx_timestamp_register(&hwts_dynfield_offset, NULL);
158 if (hwts_dynfield_offset < 0) {
159 printf("ERROR: Failed to register timestamp field\n");
160 return -rte_errno;
161 }
162 }
163
164 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
165 if (retval != 0)
166 return retval;
167
168 retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
169 if (retval != 0)
170 return retval;
171
172 rxconf = dev_info.default_rxconf;
173
174 for (q = 0; q < rx_rings; q++) {
175 retval = rte_eth_rx_queue_setup(port, q, nb_rxd,
176 rte_eth_dev_socket_id(port), &rxconf, mbuf_pool);
177 if (retval < 0)
178 return retval;
179 }
180
181 txconf = dev_info.default_txconf;
182 txconf.offloads = port_conf.txmode.offloads;
183 for (q = 0; q < tx_rings; q++) {
184 retval = rte_eth_tx_queue_setup(port, q, nb_txd,
185 rte_eth_dev_socket_id(port), &txconf);
186 if (retval < 0)
187 return retval;
188 }
189
190 retval = rte_eth_dev_start(port);
191 if (retval < 0)
192 return retval;
193
194 if (hw_timestamping && ticks_per_cycle_mult == 0) {
195 uint64_t cycles_base = rte_rdtsc();
196 uint64_t ticks_base;
197 retval = rte_eth_read_clock(port, &ticks_base);
198 if (retval != 0)
199 return retval;
200 rte_delay_ms(100);
201 uint64_t cycles = rte_rdtsc();
202 uint64_t ticks;
203 rte_eth_read_clock(port, &ticks);
204 uint64_t c_freq = cycles - cycles_base;
205 uint64_t t_freq = ticks - ticks_base;
206 double freq_mult = (double)c_freq / t_freq;
207 printf("TSC Freq ~= %" PRIu64
208 "\nHW Freq ~= %" PRIu64
209 "\nRatio : %f\n",
210 c_freq * 10, t_freq * 10, freq_mult);
211 /* TSC will be faster than internal ticks so freq_mult is > 0
212 * We convert the multiplication to an integer shift & mult
213 */
214 ticks_per_cycle_mult = (1 << TICKS_PER_CYCLE_SHIFT) / freq_mult;
215 }
216
217 struct rte_ether_addr addr;
218
219 retval = rte_eth_macaddr_get(port, &addr);
220 if (retval < 0) {
221 printf("Failed to get MAC address on port %u: %s\n",
222 port, rte_strerror(-retval));
223 return retval;
224 }
225 printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
226 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
227 (unsigned)port,
228 RTE_ETHER_ADDR_BYTES(&addr));
229
230 retval = rte_eth_promiscuous_enable(port);
231 if (retval != 0)
232 return retval;
233
234 /* RX and TX callbacks are added to the ports. 8< */
235 rte_eth_add_rx_callback(port, 0, add_timestamps, NULL);
236 rte_eth_add_tx_callback(port, 0, calc_latency, NULL);
237 /* >8 End of RX and TX callbacks. */
238
239 return 0;
240 }
241 /* >8 End of port initialization. */
242
243 /*
244 * Main thread that does the work, reading from INPUT_PORT
245 * and writing to OUTPUT_PORT
246 */
247 static __rte_noreturn void
lcore_main(void)248 lcore_main(void)
249 {
250 uint16_t port;
251
252 RTE_ETH_FOREACH_DEV(port)
253 if (rte_eth_dev_socket_id(port) >= 0 &&
254 rte_eth_dev_socket_id(port) !=
255 (int)rte_socket_id())
256 printf("WARNING, port %u is on remote NUMA node to "
257 "polling thread.\n\tPerformance will "
258 "not be optimal.\n", port);
259
260 printf("\nCore %u forwarding packets. [Ctrl+C to quit]\n",
261 rte_lcore_id());
262 for (;;) {
263 RTE_ETH_FOREACH_DEV(port) {
264 struct rte_mbuf *bufs[BURST_SIZE];
265 const uint16_t nb_rx = rte_eth_rx_burst(port, 0,
266 bufs, BURST_SIZE);
267 if (unlikely(nb_rx == 0))
268 continue;
269 const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0,
270 bufs, nb_rx);
271 if (unlikely(nb_tx < nb_rx)) {
272 uint16_t buf;
273
274 for (buf = nb_tx; buf < nb_rx; buf++)
275 rte_pktmbuf_free(bufs[buf]);
276 }
277 }
278 }
279 }
280
281 /* Main function, does initialisation and calls the per-lcore functions */
282 int
main(int argc,char * argv[])283 main(int argc, char *argv[])
284 {
285 struct rte_mempool *mbuf_pool;
286 uint16_t nb_ports;
287 uint16_t portid;
288 struct option lgopts[] = {
289 { NULL, 0, 0, 0 }
290 };
291 int opt, option_index;
292
293 static const struct rte_mbuf_dynfield tsc_dynfield_desc = {
294 .name = "example_bbdev_dynfield_tsc",
295 .size = sizeof(tsc_t),
296 .align = alignof(tsc_t),
297 };
298
299 /* init EAL */
300 int ret = rte_eal_init(argc, argv);
301
302 if (ret < 0)
303 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
304 argc -= ret;
305 argv += ret;
306
307 while ((opt = getopt_long(argc, argv, "t", lgopts, &option_index))
308 != EOF)
309 switch (opt) {
310 case 't':
311 hw_timestamping = 1;
312 break;
313 default:
314 printf(usage, argv[0]);
315 return -1;
316 }
317 optind = 1; /* reset getopt lib */
318
319 nb_ports = rte_eth_dev_count_avail();
320 if (nb_ports < 2 || (nb_ports & 1))
321 rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n");
322
323 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
324 NUM_MBUFS * nb_ports, MBUF_CACHE_SIZE, 0,
325 RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
326 if (mbuf_pool == NULL)
327 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
328
329 tsc_dynfield_offset =
330 rte_mbuf_dynfield_register(&tsc_dynfield_desc);
331 if (tsc_dynfield_offset < 0)
332 rte_exit(EXIT_FAILURE, "Cannot register mbuf field\n");
333
334 /* initialize all ports */
335 RTE_ETH_FOREACH_DEV(portid)
336 if (port_init(portid, mbuf_pool) != 0)
337 rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu16"\n",
338 portid);
339
340 if (rte_lcore_count() > 1)
341 printf("\nWARNING: Too much enabled lcores - "
342 "App uses only 1 lcore\n");
343
344 /* call lcore_main on main core only */
345 lcore_main();
346
347 /* clean up the EAL */
348 rte_eal_cleanup();
349
350 return 0;
351 }
352