13998e2a0SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
23998e2a0SBruce Richardson * Copyright(c) 2010-2016 Intel Corporation
3204896f8SPawel Wodkowski */
4204896f8SPawel Wodkowski
5204896f8SPawel Wodkowski #include <locale.h>
6204896f8SPawel Wodkowski #include <stdlib.h>
7204896f8SPawel Wodkowski #include <string.h>
8204896f8SPawel Wodkowski #include <stdint.h>
9204896f8SPawel Wodkowski #include <ctype.h>
10204896f8SPawel Wodkowski #include <getopt.h>
11204896f8SPawel Wodkowski
12204896f8SPawel Wodkowski #include <rte_common.h>
13204896f8SPawel Wodkowski #include <rte_log.h>
14e2366e74STomasz Kulasek #include <rte_malloc.h>
15204896f8SPawel Wodkowski #include <rte_memory.h>
16204896f8SPawel Wodkowski #include <rte_memcpy.h>
17204896f8SPawel Wodkowski #include <rte_eal.h>
18204896f8SPawel Wodkowski #include <rte_launch.h>
19204896f8SPawel Wodkowski #include <rte_cycles.h>
20204896f8SPawel Wodkowski #include <rte_prefetch.h>
21204896f8SPawel Wodkowski #include <rte_lcore.h>
22204896f8SPawel Wodkowski #include <rte_per_lcore.h>
23204896f8SPawel Wodkowski #include <rte_branch_prediction.h>
24204896f8SPawel Wodkowski #include <rte_interrupts.h>
25204896f8SPawel Wodkowski #include <rte_debug.h>
26204896f8SPawel Wodkowski #include <rte_ether.h>
27204896f8SPawel Wodkowski #include <rte_ethdev.h>
28204896f8SPawel Wodkowski #include <rte_mempool.h>
29204896f8SPawel Wodkowski #include <rte_mbuf.h>
30204896f8SPawel Wodkowski #include <rte_spinlock.h>
31204896f8SPawel Wodkowski
32204896f8SPawel Wodkowski #include <rte_errno.h>
33204896f8SPawel Wodkowski #include <rte_jobstats.h>
34204896f8SPawel Wodkowski #include <rte_timer.h>
35204896f8SPawel Wodkowski #include <rte_alarm.h>
36577329e6SJerin Jacob #include <rte_pause.h>
37204896f8SPawel Wodkowski
38204896f8SPawel Wodkowski #define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
39204896f8SPawel Wodkowski
40204896f8SPawel Wodkowski #define NB_MBUF 8192
41204896f8SPawel Wodkowski
42204896f8SPawel Wodkowski #define MAX_PKT_BURST 32
43204896f8SPawel Wodkowski #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
44204896f8SPawel Wodkowski
45204896f8SPawel Wodkowski /*
46204896f8SPawel Wodkowski * Configurable number of RX/TX ring descriptors
47204896f8SPawel Wodkowski */
484ed89049SDavid Marchand #define RX_DESC_DEFAULT 1024
494ed89049SDavid Marchand #define TX_DESC_DEFAULT 1024
504ed89049SDavid Marchand static uint16_t nb_rxd = RX_DESC_DEFAULT;
514ed89049SDavid Marchand static uint16_t nb_txd = TX_DESC_DEFAULT;
52204896f8SPawel Wodkowski
53204896f8SPawel Wodkowski /* ethernet addresses of ports */
546d13ea8eSOlivier Matz static struct rte_ether_addr l2fwd_ports_eth_addr[RTE_MAX_ETHPORTS];
55204896f8SPawel Wodkowski
56204896f8SPawel Wodkowski /* mask of enabled ports */
57204896f8SPawel Wodkowski static uint32_t l2fwd_enabled_port_mask;
58204896f8SPawel Wodkowski
59204896f8SPawel Wodkowski /* list of enabled ports */
60204896f8SPawel Wodkowski static uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS];
61204896f8SPawel Wodkowski
62204896f8SPawel Wodkowski #define UPDATE_STEP_UP 1
63204896f8SPawel Wodkowski #define UPDATE_STEP_DOWN 32
64204896f8SPawel Wodkowski
65204896f8SPawel Wodkowski static unsigned int l2fwd_rx_queue_per_lcore = 1;
66204896f8SPawel Wodkowski
67204896f8SPawel Wodkowski #define MAX_RX_QUEUE_PER_LCORE 16
68204896f8SPawel Wodkowski #define MAX_TX_QUEUE_PER_PORT 16
699a212dc0SConor Fogarty /* List of queues to be polled for given lcore. 8< */
707e06c0deSTyler Retzlaff struct __rte_cache_aligned lcore_queue_conf {
71204896f8SPawel Wodkowski unsigned n_rx_port;
72204896f8SPawel Wodkowski unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
73e2366e74STomasz Kulasek uint64_t next_flush_time[RTE_MAX_ETHPORTS];
74204896f8SPawel Wodkowski
75204896f8SPawel Wodkowski struct rte_timer rx_timers[MAX_RX_QUEUE_PER_LCORE];
76204896f8SPawel Wodkowski struct rte_jobstats port_fwd_jobs[MAX_RX_QUEUE_PER_LCORE];
77204896f8SPawel Wodkowski
78204896f8SPawel Wodkowski struct rte_timer flush_timer;
79204896f8SPawel Wodkowski struct rte_jobstats flush_job;
80204896f8SPawel Wodkowski struct rte_jobstats idle_job;
81204896f8SPawel Wodkowski struct rte_jobstats_context jobs_context;
82204896f8SPawel Wodkowski
83*92e68d9cSTyler Retzlaff RTE_ATOMIC(uint16_t) stats_read_pending;
84204896f8SPawel Wodkowski rte_spinlock_t lock;
857e06c0deSTyler Retzlaff };
869a212dc0SConor Fogarty /* >8 End of list of queues to be polled for given lcore. */
87204896f8SPawel Wodkowski struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
88204896f8SPawel Wodkowski
89e2366e74STomasz Kulasek struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
90e2366e74STomasz Kulasek
910cdee235SShahaf Shuler static struct rte_eth_conf port_conf = {
92204896f8SPawel Wodkowski .txmode = {
93295968d1SFerruh Yigit .mq_mode = RTE_ETH_MQ_TX_NONE,
94204896f8SPawel Wodkowski },
95204896f8SPawel Wodkowski };
96204896f8SPawel Wodkowski
97204896f8SPawel Wodkowski struct rte_mempool *l2fwd_pktmbuf_pool = NULL;
98204896f8SPawel Wodkowski
99204896f8SPawel Wodkowski /* Per-port statistics struct */
1007e06c0deSTyler Retzlaff struct __rte_cache_aligned l2fwd_port_statistics {
101204896f8SPawel Wodkowski uint64_t tx;
102204896f8SPawel Wodkowski uint64_t rx;
103204896f8SPawel Wodkowski uint64_t dropped;
1047e06c0deSTyler Retzlaff };
105204896f8SPawel Wodkowski struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS];
106204896f8SPawel Wodkowski
107204896f8SPawel Wodkowski /* 1 day max */
108204896f8SPawel Wodkowski #define MAX_TIMER_PERIOD 86400
109204896f8SPawel Wodkowski /* default period is 10 seconds */
110204896f8SPawel Wodkowski static int64_t timer_period = 10;
111204896f8SPawel Wodkowski /* default timer frequency */
112204896f8SPawel Wodkowski static double hz;
113204896f8SPawel Wodkowski /* BURST_TX_DRAIN_US converted to cycles */
114204896f8SPawel Wodkowski uint64_t drain_tsc;
115204896f8SPawel Wodkowski /* Convert cycles to ns */
116204896f8SPawel Wodkowski static inline double
cycles_to_ns(uint64_t cycles)117204896f8SPawel Wodkowski cycles_to_ns(uint64_t cycles)
118204896f8SPawel Wodkowski {
119204896f8SPawel Wodkowski double t = cycles;
120204896f8SPawel Wodkowski
121204896f8SPawel Wodkowski t *= (double)NS_PER_S;
122204896f8SPawel Wodkowski t /= hz;
123204896f8SPawel Wodkowski return t;
124204896f8SPawel Wodkowski }
125204896f8SPawel Wodkowski
126204896f8SPawel Wodkowski static void
show_lcore_stats(unsigned lcore_id)127204896f8SPawel Wodkowski show_lcore_stats(unsigned lcore_id)
128204896f8SPawel Wodkowski {
129204896f8SPawel Wodkowski struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id];
130204896f8SPawel Wodkowski struct rte_jobstats_context *ctx = &qconf->jobs_context;
131204896f8SPawel Wodkowski struct rte_jobstats *job;
132204896f8SPawel Wodkowski uint8_t i;
133204896f8SPawel Wodkowski
134204896f8SPawel Wodkowski /* LCore statistics. */
135204896f8SPawel Wodkowski uint64_t stats_period, loop_count;
136204896f8SPawel Wodkowski uint64_t exec, exec_min, exec_max;
137204896f8SPawel Wodkowski uint64_t management, management_min, management_max;
138204896f8SPawel Wodkowski uint64_t busy, busy_min, busy_max;
139204896f8SPawel Wodkowski
140204896f8SPawel Wodkowski /* Jobs statistics. */
14147523597SZhiyong Yang const uint16_t port_cnt = qconf->n_rx_port;
142204896f8SPawel Wodkowski uint64_t jobs_exec_cnt[port_cnt], jobs_period[port_cnt];
143204896f8SPawel Wodkowski uint64_t jobs_exec[port_cnt], jobs_exec_min[port_cnt],
144204896f8SPawel Wodkowski jobs_exec_max[port_cnt];
145204896f8SPawel Wodkowski
146204896f8SPawel Wodkowski uint64_t flush_exec_cnt, flush_period;
147204896f8SPawel Wodkowski uint64_t flush_exec, flush_exec_min, flush_exec_max;
148204896f8SPawel Wodkowski
149204896f8SPawel Wodkowski uint64_t idle_exec_cnt;
150204896f8SPawel Wodkowski uint64_t idle_exec, idle_exec_min, idle_exec_max;
151204896f8SPawel Wodkowski uint64_t collection_time = rte_get_timer_cycles();
152204896f8SPawel Wodkowski
153204896f8SPawel Wodkowski /* Ask forwarding thread to give us stats. */
154*92e68d9cSTyler Retzlaff rte_atomic_store_explicit(&qconf->stats_read_pending, 1, rte_memory_order_relaxed);
155204896f8SPawel Wodkowski rte_spinlock_lock(&qconf->lock);
156*92e68d9cSTyler Retzlaff rte_atomic_store_explicit(&qconf->stats_read_pending, 0, rte_memory_order_relaxed);
157204896f8SPawel Wodkowski
158204896f8SPawel Wodkowski /* Collect context statistics. */
159204896f8SPawel Wodkowski stats_period = ctx->state_time - ctx->start_time;
160204896f8SPawel Wodkowski loop_count = ctx->loop_cnt;
161204896f8SPawel Wodkowski
162204896f8SPawel Wodkowski exec = ctx->exec_time;
163204896f8SPawel Wodkowski exec_min = ctx->min_exec_time;
164204896f8SPawel Wodkowski exec_max = ctx->max_exec_time;
165204896f8SPawel Wodkowski
166204896f8SPawel Wodkowski management = ctx->management_time;
167204896f8SPawel Wodkowski management_min = ctx->min_management_time;
168204896f8SPawel Wodkowski management_max = ctx->max_management_time;
169204896f8SPawel Wodkowski
170204896f8SPawel Wodkowski rte_jobstats_context_reset(ctx);
171204896f8SPawel Wodkowski
172204896f8SPawel Wodkowski for (i = 0; i < port_cnt; i++) {
173204896f8SPawel Wodkowski job = &qconf->port_fwd_jobs[i];
174204896f8SPawel Wodkowski
175204896f8SPawel Wodkowski jobs_exec_cnt[i] = job->exec_cnt;
176204896f8SPawel Wodkowski jobs_period[i] = job->period;
177204896f8SPawel Wodkowski
178204896f8SPawel Wodkowski jobs_exec[i] = job->exec_time;
179204896f8SPawel Wodkowski jobs_exec_min[i] = job->min_exec_time;
180204896f8SPawel Wodkowski jobs_exec_max[i] = job->max_exec_time;
181204896f8SPawel Wodkowski
182204896f8SPawel Wodkowski rte_jobstats_reset(job);
183204896f8SPawel Wodkowski }
184204896f8SPawel Wodkowski
185204896f8SPawel Wodkowski flush_exec_cnt = qconf->flush_job.exec_cnt;
186204896f8SPawel Wodkowski flush_period = qconf->flush_job.period;
187204896f8SPawel Wodkowski flush_exec = qconf->flush_job.exec_time;
188204896f8SPawel Wodkowski flush_exec_min = qconf->flush_job.min_exec_time;
189204896f8SPawel Wodkowski flush_exec_max = qconf->flush_job.max_exec_time;
190204896f8SPawel Wodkowski rte_jobstats_reset(&qconf->flush_job);
191204896f8SPawel Wodkowski
192204896f8SPawel Wodkowski idle_exec_cnt = qconf->idle_job.exec_cnt;
193204896f8SPawel Wodkowski idle_exec = qconf->idle_job.exec_time;
194204896f8SPawel Wodkowski idle_exec_min = qconf->idle_job.min_exec_time;
195204896f8SPawel Wodkowski idle_exec_max = qconf->idle_job.max_exec_time;
196204896f8SPawel Wodkowski rte_jobstats_reset(&qconf->idle_job);
197204896f8SPawel Wodkowski
198204896f8SPawel Wodkowski rte_spinlock_unlock(&qconf->lock);
199204896f8SPawel Wodkowski
200204896f8SPawel Wodkowski exec -= idle_exec;
201204896f8SPawel Wodkowski busy = exec + management;
202204896f8SPawel Wodkowski busy_min = exec_min + management_min;
203204896f8SPawel Wodkowski busy_max = exec_max + management_max;
204204896f8SPawel Wodkowski
205204896f8SPawel Wodkowski
206204896f8SPawel Wodkowski collection_time = rte_get_timer_cycles() - collection_time;
207204896f8SPawel Wodkowski
208204896f8SPawel Wodkowski #define STAT_FMT "\n%-18s %'14.0f %6.1f%% %'10.0f %'10.0f %'10.0f"
209204896f8SPawel Wodkowski
210204896f8SPawel Wodkowski printf("\n----------------"
211204896f8SPawel Wodkowski "\nLCore %3u: statistics (time in ns, collected in %'9.0f)"
212204896f8SPawel Wodkowski "\n%-18s %14s %7s %10s %10s %10s "
213204896f8SPawel Wodkowski "\n%-18s %'14.0f"
214204896f8SPawel Wodkowski "\n%-18s %'14" PRIu64
215204896f8SPawel Wodkowski STAT_FMT /* Exec */
216204896f8SPawel Wodkowski STAT_FMT /* Management */
217204896f8SPawel Wodkowski STAT_FMT /* Busy */
218204896f8SPawel Wodkowski STAT_FMT, /* Idle */
219204896f8SPawel Wodkowski lcore_id, cycles_to_ns(collection_time),
220204896f8SPawel Wodkowski "Stat type", "total", "%total", "avg", "min", "max",
221204896f8SPawel Wodkowski "Stats duration:", cycles_to_ns(stats_period),
222204896f8SPawel Wodkowski "Loop count:", loop_count,
223204896f8SPawel Wodkowski "Exec time",
224204896f8SPawel Wodkowski cycles_to_ns(exec), exec * 100.0 / stats_period,
225204896f8SPawel Wodkowski cycles_to_ns(loop_count ? exec / loop_count : 0),
226204896f8SPawel Wodkowski cycles_to_ns(exec_min),
227204896f8SPawel Wodkowski cycles_to_ns(exec_max),
228204896f8SPawel Wodkowski "Management time",
229204896f8SPawel Wodkowski cycles_to_ns(management), management * 100.0 / stats_period,
230204896f8SPawel Wodkowski cycles_to_ns(loop_count ? management / loop_count : 0),
231204896f8SPawel Wodkowski cycles_to_ns(management_min),
232204896f8SPawel Wodkowski cycles_to_ns(management_max),
233204896f8SPawel Wodkowski "Exec + management",
234204896f8SPawel Wodkowski cycles_to_ns(busy), busy * 100.0 / stats_period,
235204896f8SPawel Wodkowski cycles_to_ns(loop_count ? busy / loop_count : 0),
236204896f8SPawel Wodkowski cycles_to_ns(busy_min),
237204896f8SPawel Wodkowski cycles_to_ns(busy_max),
238204896f8SPawel Wodkowski "Idle (job)",
239204896f8SPawel Wodkowski cycles_to_ns(idle_exec), idle_exec * 100.0 / stats_period,
240204896f8SPawel Wodkowski cycles_to_ns(idle_exec_cnt ? idle_exec / idle_exec_cnt : 0),
241204896f8SPawel Wodkowski cycles_to_ns(idle_exec_min),
242204896f8SPawel Wodkowski cycles_to_ns(idle_exec_max));
243204896f8SPawel Wodkowski
244204896f8SPawel Wodkowski for (i = 0; i < qconf->n_rx_port; i++) {
245204896f8SPawel Wodkowski job = &qconf->port_fwd_jobs[i];
246204896f8SPawel Wodkowski printf("\n\nJob %" PRIu32 ": %-20s "
247204896f8SPawel Wodkowski "\n%-18s %'14" PRIu64
248204896f8SPawel Wodkowski "\n%-18s %'14.0f"
249204896f8SPawel Wodkowski STAT_FMT,
250204896f8SPawel Wodkowski i, job->name,
251204896f8SPawel Wodkowski "Exec count:", jobs_exec_cnt[i],
252204896f8SPawel Wodkowski "Exec period: ", cycles_to_ns(jobs_period[i]),
253204896f8SPawel Wodkowski "Exec time",
254204896f8SPawel Wodkowski cycles_to_ns(jobs_exec[i]), jobs_exec[i] * 100.0 / stats_period,
255204896f8SPawel Wodkowski cycles_to_ns(jobs_exec_cnt[i] ? jobs_exec[i] / jobs_exec_cnt[i]
256204896f8SPawel Wodkowski : 0),
257204896f8SPawel Wodkowski cycles_to_ns(jobs_exec_min[i]),
258204896f8SPawel Wodkowski cycles_to_ns(jobs_exec_max[i]));
259204896f8SPawel Wodkowski }
260204896f8SPawel Wodkowski
261204896f8SPawel Wodkowski if (qconf->n_rx_port > 0) {
262204896f8SPawel Wodkowski job = &qconf->flush_job;
263204896f8SPawel Wodkowski printf("\n\nJob %" PRIu32 ": %-20s "
264204896f8SPawel Wodkowski "\n%-18s %'14" PRIu64
265204896f8SPawel Wodkowski "\n%-18s %'14.0f"
266204896f8SPawel Wodkowski STAT_FMT,
267204896f8SPawel Wodkowski i, job->name,
268204896f8SPawel Wodkowski "Exec count:", flush_exec_cnt,
269204896f8SPawel Wodkowski "Exec period: ", cycles_to_ns(flush_period),
270204896f8SPawel Wodkowski "Exec time",
271204896f8SPawel Wodkowski cycles_to_ns(flush_exec), flush_exec * 100.0 / stats_period,
272204896f8SPawel Wodkowski cycles_to_ns(flush_exec_cnt ? flush_exec / flush_exec_cnt : 0),
273204896f8SPawel Wodkowski cycles_to_ns(flush_exec_min),
274204896f8SPawel Wodkowski cycles_to_ns(flush_exec_max));
275204896f8SPawel Wodkowski }
276204896f8SPawel Wodkowski }
277204896f8SPawel Wodkowski
278204896f8SPawel Wodkowski /* Print out statistics on packets dropped */
279204896f8SPawel Wodkowski static void
show_stats_cb(__rte_unused void * param)280204896f8SPawel Wodkowski show_stats_cb(__rte_unused void *param)
281204896f8SPawel Wodkowski {
282204896f8SPawel Wodkowski uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
283204896f8SPawel Wodkowski unsigned portid, lcore_id;
284204896f8SPawel Wodkowski
285204896f8SPawel Wodkowski total_packets_dropped = 0;
286204896f8SPawel Wodkowski total_packets_tx = 0;
287204896f8SPawel Wodkowski total_packets_rx = 0;
288204896f8SPawel Wodkowski
289204896f8SPawel Wodkowski const char clr[] = { 27, '[', '2', 'J', '\0' };
290204896f8SPawel Wodkowski const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
291204896f8SPawel Wodkowski
292204896f8SPawel Wodkowski /* Clear screen and move to top left */
293204896f8SPawel Wodkowski printf("%s%s"
294204896f8SPawel Wodkowski "\nPort statistics ===================================",
295204896f8SPawel Wodkowski clr, topLeft);
296204896f8SPawel Wodkowski
297204896f8SPawel Wodkowski for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
298204896f8SPawel Wodkowski /* skip disabled ports */
299204896f8SPawel Wodkowski if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
300204896f8SPawel Wodkowski continue;
301204896f8SPawel Wodkowski printf("\nStatistics for port %u ------------------------------"
302204896f8SPawel Wodkowski "\nPackets sent: %24"PRIu64
303204896f8SPawel Wodkowski "\nPackets received: %20"PRIu64
304204896f8SPawel Wodkowski "\nPackets dropped: %21"PRIu64,
305204896f8SPawel Wodkowski portid,
306204896f8SPawel Wodkowski port_statistics[portid].tx,
307204896f8SPawel Wodkowski port_statistics[portid].rx,
308204896f8SPawel Wodkowski port_statistics[portid].dropped);
309204896f8SPawel Wodkowski
310204896f8SPawel Wodkowski total_packets_dropped += port_statistics[portid].dropped;
311204896f8SPawel Wodkowski total_packets_tx += port_statistics[portid].tx;
312204896f8SPawel Wodkowski total_packets_rx += port_statistics[portid].rx;
313204896f8SPawel Wodkowski }
314204896f8SPawel Wodkowski
315204896f8SPawel Wodkowski printf("\nAggregate statistics ==============================="
316204896f8SPawel Wodkowski "\nTotal packets sent: %18"PRIu64
317204896f8SPawel Wodkowski "\nTotal packets received: %14"PRIu64
318204896f8SPawel Wodkowski "\nTotal packets dropped: %15"PRIu64
319204896f8SPawel Wodkowski "\n====================================================",
320204896f8SPawel Wodkowski total_packets_tx,
321204896f8SPawel Wodkowski total_packets_rx,
322204896f8SPawel Wodkowski total_packets_dropped);
323204896f8SPawel Wodkowski
324204896f8SPawel Wodkowski RTE_LCORE_FOREACH(lcore_id) {
325204896f8SPawel Wodkowski if (lcore_queue_conf[lcore_id].n_rx_port > 0)
326204896f8SPawel Wodkowski show_lcore_stats(lcore_id);
327204896f8SPawel Wodkowski }
328204896f8SPawel Wodkowski
329204896f8SPawel Wodkowski printf("\n====================================================\n");
3303ee6f706SGeorgiy Levashov
3313ee6f706SGeorgiy Levashov fflush(stdout);
3323ee6f706SGeorgiy Levashov
333204896f8SPawel Wodkowski rte_eal_alarm_set(timer_period * US_PER_S, show_stats_cb, NULL);
334204896f8SPawel Wodkowski }
335204896f8SPawel Wodkowski
3369a212dc0SConor Fogarty /* Start of l2fwd_simple_forward. 8< */
337204896f8SPawel Wodkowski static void
l2fwd_simple_forward(struct rte_mbuf * m,unsigned portid)338204896f8SPawel Wodkowski l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid)
339204896f8SPawel Wodkowski {
3406d13ea8eSOlivier Matz struct rte_ether_hdr *eth;
341204896f8SPawel Wodkowski void *tmp;
342e2366e74STomasz Kulasek int sent;
343204896f8SPawel Wodkowski unsigned dst_port;
344e2366e74STomasz Kulasek struct rte_eth_dev_tx_buffer *buffer;
345204896f8SPawel Wodkowski
346204896f8SPawel Wodkowski dst_port = l2fwd_dst_ports[portid];
3476d13ea8eSOlivier Matz eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
348204896f8SPawel Wodkowski
349204896f8SPawel Wodkowski /* 02:00:00:00:00:xx */
35004d43857SDmitry Kozlyuk tmp = ð->dst_addr.addr_bytes[0];
351204896f8SPawel Wodkowski *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
352204896f8SPawel Wodkowski
353204896f8SPawel Wodkowski /* src addr */
35404d43857SDmitry Kozlyuk rte_ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], ð->src_addr);
355204896f8SPawel Wodkowski
356e2366e74STomasz Kulasek buffer = tx_buffer[dst_port];
357e2366e74STomasz Kulasek sent = rte_eth_tx_buffer(dst_port, 0, buffer, m);
358e2366e74STomasz Kulasek if (sent)
359e2366e74STomasz Kulasek port_statistics[dst_port].tx += sent;
360204896f8SPawel Wodkowski }
3619a212dc0SConor Fogarty /* >8 End of l2fwd_simple_forward. */
362204896f8SPawel Wodkowski
363204896f8SPawel Wodkowski static void
l2fwd_job_update_cb(struct rte_jobstats * job,int64_t result)364204896f8SPawel Wodkowski l2fwd_job_update_cb(struct rte_jobstats *job, int64_t result)
365204896f8SPawel Wodkowski {
366204896f8SPawel Wodkowski int64_t err = job->target - result;
367204896f8SPawel Wodkowski int64_t histeresis = job->target / 8;
368204896f8SPawel Wodkowski
369204896f8SPawel Wodkowski if (err < -histeresis) {
370204896f8SPawel Wodkowski if (job->min_period + UPDATE_STEP_DOWN < job->period)
371204896f8SPawel Wodkowski job->period -= UPDATE_STEP_DOWN;
372204896f8SPawel Wodkowski } else if (err > histeresis) {
373204896f8SPawel Wodkowski if (job->period + UPDATE_STEP_UP < job->max_period)
374204896f8SPawel Wodkowski job->period += UPDATE_STEP_UP;
375204896f8SPawel Wodkowski }
376204896f8SPawel Wodkowski }
377204896f8SPawel Wodkowski
378204896f8SPawel Wodkowski static void
l2fwd_fwd_job(__rte_unused struct rte_timer * timer,void * arg)379204896f8SPawel Wodkowski l2fwd_fwd_job(__rte_unused struct rte_timer *timer, void *arg)
380204896f8SPawel Wodkowski {
381204896f8SPawel Wodkowski struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
382204896f8SPawel Wodkowski struct rte_mbuf *m;
383204896f8SPawel Wodkowski
38447523597SZhiyong Yang const uint16_t port_idx = (uintptr_t) arg;
385204896f8SPawel Wodkowski const unsigned lcore_id = rte_lcore_id();
386204896f8SPawel Wodkowski struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id];
387204896f8SPawel Wodkowski struct rte_jobstats *job = &qconf->port_fwd_jobs[port_idx];
38847523597SZhiyong Yang const uint16_t portid = qconf->rx_port_list[port_idx];
389204896f8SPawel Wodkowski
390204896f8SPawel Wodkowski uint8_t j;
391204896f8SPawel Wodkowski uint16_t total_nb_rx;
392204896f8SPawel Wodkowski
393204896f8SPawel Wodkowski rte_jobstats_start(&qconf->jobs_context, job);
394204896f8SPawel Wodkowski
395204896f8SPawel Wodkowski /* Call rx burst 2 times. This allow rte_jobstats logic to see if this
396204896f8SPawel Wodkowski * function must be called more frequently. */
397204896f8SPawel Wodkowski
3989a212dc0SConor Fogarty /* Call rx burst 2 times. 8< */
39947523597SZhiyong Yang total_nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst,
400204896f8SPawel Wodkowski MAX_PKT_BURST);
401204896f8SPawel Wodkowski
402204896f8SPawel Wodkowski for (j = 0; j < total_nb_rx; j++) {
403204896f8SPawel Wodkowski m = pkts_burst[j];
404204896f8SPawel Wodkowski rte_prefetch0(rte_pktmbuf_mtod(m, void *));
405204896f8SPawel Wodkowski l2fwd_simple_forward(m, portid);
406204896f8SPawel Wodkowski }
4079a212dc0SConor Fogarty /* >8 End of call rx burst 2 times. */
408204896f8SPawel Wodkowski
4099a212dc0SConor Fogarty /* Read second try. 8< */
410204896f8SPawel Wodkowski if (total_nb_rx == MAX_PKT_BURST) {
41147523597SZhiyong Yang const uint16_t nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst,
412204896f8SPawel Wodkowski MAX_PKT_BURST);
413204896f8SPawel Wodkowski
414204896f8SPawel Wodkowski total_nb_rx += nb_rx;
415204896f8SPawel Wodkowski for (j = 0; j < nb_rx; j++) {
416204896f8SPawel Wodkowski m = pkts_burst[j];
417204896f8SPawel Wodkowski rte_prefetch0(rte_pktmbuf_mtod(m, void *));
418204896f8SPawel Wodkowski l2fwd_simple_forward(m, portid);
419204896f8SPawel Wodkowski }
420204896f8SPawel Wodkowski }
4219a212dc0SConor Fogarty /* >8 End of read second try. */
422204896f8SPawel Wodkowski
423204896f8SPawel Wodkowski port_statistics[portid].rx += total_nb_rx;
424204896f8SPawel Wodkowski
4259a212dc0SConor Fogarty /* Adjust period time in which we are running here. 8< */
426204896f8SPawel Wodkowski if (rte_jobstats_finish(job, total_nb_rx) != 0) {
427204896f8SPawel Wodkowski rte_timer_reset(&qconf->rx_timers[port_idx], job->period, PERIODICAL,
428204896f8SPawel Wodkowski lcore_id, l2fwd_fwd_job, arg);
429204896f8SPawel Wodkowski }
4309a212dc0SConor Fogarty /* >8 End of adjust period time in which we are running. */
431204896f8SPawel Wodkowski }
432204896f8SPawel Wodkowski
4339a212dc0SConor Fogarty /* Draining TX queue of each port. 8< */
434204896f8SPawel Wodkowski static void
l2fwd_flush_job(__rte_unused struct rte_timer * timer,__rte_unused void * arg)435204896f8SPawel Wodkowski l2fwd_flush_job(__rte_unused struct rte_timer *timer, __rte_unused void *arg)
436204896f8SPawel Wodkowski {
437204896f8SPawel Wodkowski uint64_t now;
438204896f8SPawel Wodkowski unsigned lcore_id;
439204896f8SPawel Wodkowski struct lcore_queue_conf *qconf;
44047523597SZhiyong Yang uint16_t portid;
441e2366e74STomasz Kulasek unsigned i;
442e2366e74STomasz Kulasek uint32_t sent;
443e2366e74STomasz Kulasek struct rte_eth_dev_tx_buffer *buffer;
444204896f8SPawel Wodkowski
445204896f8SPawel Wodkowski lcore_id = rte_lcore_id();
446204896f8SPawel Wodkowski qconf = &lcore_queue_conf[lcore_id];
447204896f8SPawel Wodkowski
448204896f8SPawel Wodkowski rte_jobstats_start(&qconf->jobs_context, &qconf->flush_job);
449204896f8SPawel Wodkowski
450204896f8SPawel Wodkowski now = rte_get_timer_cycles();
451204896f8SPawel Wodkowski lcore_id = rte_lcore_id();
452204896f8SPawel Wodkowski qconf = &lcore_queue_conf[lcore_id];
453e2366e74STomasz Kulasek
454e2366e74STomasz Kulasek for (i = 0; i < qconf->n_rx_port; i++) {
455e2366e74STomasz Kulasek portid = l2fwd_dst_ports[qconf->rx_port_list[i]];
456e2366e74STomasz Kulasek
457e2366e74STomasz Kulasek if (qconf->next_flush_time[portid] <= now)
458204896f8SPawel Wodkowski continue;
459204896f8SPawel Wodkowski
460e2366e74STomasz Kulasek buffer = tx_buffer[portid];
461e2366e74STomasz Kulasek sent = rte_eth_tx_buffer_flush(portid, 0, buffer);
462e2366e74STomasz Kulasek if (sent)
463e2366e74STomasz Kulasek port_statistics[portid].tx += sent;
464204896f8SPawel Wodkowski
465e2366e74STomasz Kulasek qconf->next_flush_time[portid] = rte_get_timer_cycles() + drain_tsc;
466e2366e74STomasz Kulasek }
467204896f8SPawel Wodkowski
4687be78d02SJosh Soref /* Pass target to indicate that this job is happy of time interval
469204896f8SPawel Wodkowski * in which it was called. */
470204896f8SPawel Wodkowski rte_jobstats_finish(&qconf->flush_job, qconf->flush_job.target);
471204896f8SPawel Wodkowski }
4729a212dc0SConor Fogarty /* >8 End of draining TX queue of each port. */
473204896f8SPawel Wodkowski
474204896f8SPawel Wodkowski /* main processing loop */
475204896f8SPawel Wodkowski static void
l2fwd_main_loop(void)476204896f8SPawel Wodkowski l2fwd_main_loop(void)
477204896f8SPawel Wodkowski {
478204896f8SPawel Wodkowski unsigned lcore_id;
479204896f8SPawel Wodkowski unsigned i, portid;
480204896f8SPawel Wodkowski struct lcore_queue_conf *qconf;
481204896f8SPawel Wodkowski uint8_t stats_read_pending = 0;
482204896f8SPawel Wodkowski uint8_t need_manage;
483204896f8SPawel Wodkowski
484204896f8SPawel Wodkowski lcore_id = rte_lcore_id();
485204896f8SPawel Wodkowski qconf = &lcore_queue_conf[lcore_id];
486204896f8SPawel Wodkowski
487204896f8SPawel Wodkowski if (qconf->n_rx_port == 0) {
488204896f8SPawel Wodkowski RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id);
489204896f8SPawel Wodkowski return;
490204896f8SPawel Wodkowski }
491204896f8SPawel Wodkowski
492204896f8SPawel Wodkowski RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id);
493204896f8SPawel Wodkowski
494204896f8SPawel Wodkowski for (i = 0; i < qconf->n_rx_port; i++) {
495204896f8SPawel Wodkowski
496204896f8SPawel Wodkowski portid = qconf->rx_port_list[i];
497204896f8SPawel Wodkowski RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id,
498204896f8SPawel Wodkowski portid);
499204896f8SPawel Wodkowski }
500204896f8SPawel Wodkowski
501204896f8SPawel Wodkowski rte_jobstats_init(&qconf->idle_job, "idle", 0, 0, 0, 0);
502204896f8SPawel Wodkowski
5039a212dc0SConor Fogarty /* Minimize impact of stats reading. 8< */
504204896f8SPawel Wodkowski for (;;) {
505204896f8SPawel Wodkowski rte_spinlock_lock(&qconf->lock);
506204896f8SPawel Wodkowski
507204896f8SPawel Wodkowski do {
508204896f8SPawel Wodkowski rte_jobstats_context_start(&qconf->jobs_context);
509204896f8SPawel Wodkowski
510204896f8SPawel Wodkowski /* Do the Idle job:
511204896f8SPawel Wodkowski * - Read stats_read_pending flag
512204896f8SPawel Wodkowski * - check if some real job need to be executed
513204896f8SPawel Wodkowski */
514204896f8SPawel Wodkowski rte_jobstats_start(&qconf->jobs_context, &qconf->idle_job);
515204896f8SPawel Wodkowski
516930cd797SMarcin Kerlin uint64_t repeats = 0;
517930cd797SMarcin Kerlin
518204896f8SPawel Wodkowski do {
519204896f8SPawel Wodkowski uint8_t i;
520204896f8SPawel Wodkowski uint64_t now = rte_get_timer_cycles();
521204896f8SPawel Wodkowski
522930cd797SMarcin Kerlin repeats++;
523204896f8SPawel Wodkowski need_manage = qconf->flush_timer.expire < now;
524204896f8SPawel Wodkowski /* Check if we was esked to give a stats. */
525*92e68d9cSTyler Retzlaff stats_read_pending = rte_atomic_load_explicit(
526*92e68d9cSTyler Retzlaff &qconf->stats_read_pending,
527*92e68d9cSTyler Retzlaff rte_memory_order_relaxed);
528204896f8SPawel Wodkowski need_manage |= stats_read_pending;
529204896f8SPawel Wodkowski
530204896f8SPawel Wodkowski for (i = 0; i < qconf->n_rx_port && !need_manage; i++)
531204896f8SPawel Wodkowski need_manage = qconf->rx_timers[i].expire < now;
532204896f8SPawel Wodkowski
533204896f8SPawel Wodkowski } while (!need_manage);
534930cd797SMarcin Kerlin
535930cd797SMarcin Kerlin if (likely(repeats != 1))
536204896f8SPawel Wodkowski rte_jobstats_finish(&qconf->idle_job, qconf->idle_job.target);
537930cd797SMarcin Kerlin else
538930cd797SMarcin Kerlin rte_jobstats_abort(&qconf->idle_job);
539204896f8SPawel Wodkowski
540204896f8SPawel Wodkowski rte_timer_manage();
541204896f8SPawel Wodkowski rte_jobstats_context_finish(&qconf->jobs_context);
542204896f8SPawel Wodkowski } while (likely(stats_read_pending == 0));
543204896f8SPawel Wodkowski
544204896f8SPawel Wodkowski rte_spinlock_unlock(&qconf->lock);
545204896f8SPawel Wodkowski rte_pause();
546204896f8SPawel Wodkowski }
5479a212dc0SConor Fogarty /* >8 End of minimize impact of stats reading. */
548204896f8SPawel Wodkowski }
549204896f8SPawel Wodkowski
550204896f8SPawel Wodkowski static int
l2fwd_launch_one_lcore(__rte_unused void * dummy)551f2fc83b4SThomas Monjalon l2fwd_launch_one_lcore(__rte_unused void *dummy)
552204896f8SPawel Wodkowski {
553204896f8SPawel Wodkowski l2fwd_main_loop();
554204896f8SPawel Wodkowski return 0;
555204896f8SPawel Wodkowski }
556204896f8SPawel Wodkowski
557204896f8SPawel Wodkowski /* display usage */
558204896f8SPawel Wodkowski static void
l2fwd_usage(const char * prgname)559204896f8SPawel Wodkowski l2fwd_usage(const char *prgname)
560204896f8SPawel Wodkowski {
561204896f8SPawel Wodkowski printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
562204896f8SPawel Wodkowski " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
563204896f8SPawel Wodkowski " -q NQ: number of queue (=ports) per lcore (default is 1)\n"
564204896f8SPawel Wodkowski " -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n"
565204896f8SPawel Wodkowski " -l set system default locale instead of default (\"C\" locale) for thousands separator in stats.",
566204896f8SPawel Wodkowski prgname);
567204896f8SPawel Wodkowski }
568204896f8SPawel Wodkowski
569204896f8SPawel Wodkowski static int
l2fwd_parse_portmask(const char * portmask)570204896f8SPawel Wodkowski l2fwd_parse_portmask(const char *portmask)
571204896f8SPawel Wodkowski {
572204896f8SPawel Wodkowski char *end = NULL;
573204896f8SPawel Wodkowski unsigned long pm;
574204896f8SPawel Wodkowski
575204896f8SPawel Wodkowski /* parse hexadecimal string */
576204896f8SPawel Wodkowski pm = strtoul(portmask, &end, 16);
577204896f8SPawel Wodkowski if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
578ce6b8c31SSarosh Arif return 0;
579204896f8SPawel Wodkowski
580204896f8SPawel Wodkowski return pm;
581204896f8SPawel Wodkowski }
582204896f8SPawel Wodkowski
583204896f8SPawel Wodkowski static unsigned int
l2fwd_parse_nqueue(const char * q_arg)584204896f8SPawel Wodkowski l2fwd_parse_nqueue(const char *q_arg)
585204896f8SPawel Wodkowski {
586204896f8SPawel Wodkowski char *end = NULL;
587204896f8SPawel Wodkowski unsigned long n;
588204896f8SPawel Wodkowski
589204896f8SPawel Wodkowski /* parse hexadecimal string */
590204896f8SPawel Wodkowski n = strtoul(q_arg, &end, 10);
591204896f8SPawel Wodkowski if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
592204896f8SPawel Wodkowski return 0;
593204896f8SPawel Wodkowski if (n == 0)
594204896f8SPawel Wodkowski return 0;
595204896f8SPawel Wodkowski if (n >= MAX_RX_QUEUE_PER_LCORE)
596204896f8SPawel Wodkowski return 0;
597204896f8SPawel Wodkowski
598204896f8SPawel Wodkowski return n;
599204896f8SPawel Wodkowski }
600204896f8SPawel Wodkowski
601204896f8SPawel Wodkowski static int
l2fwd_parse_timer_period(const char * q_arg)602204896f8SPawel Wodkowski l2fwd_parse_timer_period(const char *q_arg)
603204896f8SPawel Wodkowski {
604204896f8SPawel Wodkowski char *end = NULL;
605204896f8SPawel Wodkowski int n;
606204896f8SPawel Wodkowski
607204896f8SPawel Wodkowski /* parse number string */
608204896f8SPawel Wodkowski n = strtol(q_arg, &end, 10);
609204896f8SPawel Wodkowski if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
610204896f8SPawel Wodkowski return -1;
611204896f8SPawel Wodkowski if (n >= MAX_TIMER_PERIOD)
612204896f8SPawel Wodkowski return -1;
613204896f8SPawel Wodkowski
614204896f8SPawel Wodkowski return n;
615204896f8SPawel Wodkowski }
616204896f8SPawel Wodkowski
617204896f8SPawel Wodkowski /* Parse the argument given in the command line of the application */
618204896f8SPawel Wodkowski static int
l2fwd_parse_args(int argc,char ** argv)619204896f8SPawel Wodkowski l2fwd_parse_args(int argc, char **argv)
620204896f8SPawel Wodkowski {
621204896f8SPawel Wodkowski int opt, ret;
622204896f8SPawel Wodkowski char **argvopt;
623204896f8SPawel Wodkowski int option_index;
624204896f8SPawel Wodkowski char *prgname = argv[0];
625204896f8SPawel Wodkowski static struct option lgopts[] = {
626204896f8SPawel Wodkowski {NULL, 0, 0, 0}
627204896f8SPawel Wodkowski };
628204896f8SPawel Wodkowski
629204896f8SPawel Wodkowski argvopt = argv;
630204896f8SPawel Wodkowski
631204896f8SPawel Wodkowski while ((opt = getopt_long(argc, argvopt, "p:q:T:l",
632204896f8SPawel Wodkowski lgopts, &option_index)) != EOF) {
633204896f8SPawel Wodkowski
634204896f8SPawel Wodkowski switch (opt) {
635204896f8SPawel Wodkowski /* portmask */
636204896f8SPawel Wodkowski case 'p':
637204896f8SPawel Wodkowski l2fwd_enabled_port_mask = l2fwd_parse_portmask(optarg);
638204896f8SPawel Wodkowski if (l2fwd_enabled_port_mask == 0) {
639204896f8SPawel Wodkowski printf("invalid portmask\n");
640204896f8SPawel Wodkowski l2fwd_usage(prgname);
641204896f8SPawel Wodkowski return -1;
642204896f8SPawel Wodkowski }
643204896f8SPawel Wodkowski break;
644204896f8SPawel Wodkowski
645204896f8SPawel Wodkowski /* nqueue */
646204896f8SPawel Wodkowski case 'q':
647204896f8SPawel Wodkowski l2fwd_rx_queue_per_lcore = l2fwd_parse_nqueue(optarg);
648204896f8SPawel Wodkowski if (l2fwd_rx_queue_per_lcore == 0) {
649204896f8SPawel Wodkowski printf("invalid queue number\n");
650204896f8SPawel Wodkowski l2fwd_usage(prgname);
651204896f8SPawel Wodkowski return -1;
652204896f8SPawel Wodkowski }
653204896f8SPawel Wodkowski break;
654204896f8SPawel Wodkowski
655204896f8SPawel Wodkowski /* timer period */
656204896f8SPawel Wodkowski case 'T':
657204896f8SPawel Wodkowski timer_period = l2fwd_parse_timer_period(optarg);
658204896f8SPawel Wodkowski if (timer_period < 0) {
659204896f8SPawel Wodkowski printf("invalid timer period\n");
660204896f8SPawel Wodkowski l2fwd_usage(prgname);
661204896f8SPawel Wodkowski return -1;
662204896f8SPawel Wodkowski }
663204896f8SPawel Wodkowski break;
664204896f8SPawel Wodkowski
665204896f8SPawel Wodkowski /* For thousands separator in printf. */
666204896f8SPawel Wodkowski case 'l':
667204896f8SPawel Wodkowski setlocale(LC_ALL, "");
668204896f8SPawel Wodkowski break;
669204896f8SPawel Wodkowski
670204896f8SPawel Wodkowski /* long options */
671204896f8SPawel Wodkowski case 0:
672204896f8SPawel Wodkowski l2fwd_usage(prgname);
673204896f8SPawel Wodkowski return -1;
674204896f8SPawel Wodkowski
675204896f8SPawel Wodkowski default:
676204896f8SPawel Wodkowski l2fwd_usage(prgname);
677204896f8SPawel Wodkowski return -1;
678204896f8SPawel Wodkowski }
679204896f8SPawel Wodkowski }
680204896f8SPawel Wodkowski
681204896f8SPawel Wodkowski if (optind >= 0)
682204896f8SPawel Wodkowski argv[optind-1] = prgname;
683204896f8SPawel Wodkowski
684204896f8SPawel Wodkowski ret = optind-1;
6859d5ca532SKeith Wiles optind = 1; /* reset getopt lib */
686204896f8SPawel Wodkowski return ret;
687204896f8SPawel Wodkowski }
688204896f8SPawel Wodkowski
689204896f8SPawel Wodkowski /* Check the link status of all ports in up to 9s, and print them finally */
690204896f8SPawel Wodkowski static void
check_all_ports_link_status(uint32_t port_mask)6918728ccf3SThomas Monjalon check_all_ports_link_status(uint32_t port_mask)
692204896f8SPawel Wodkowski {
693204896f8SPawel Wodkowski #define CHECK_INTERVAL 100 /* 100ms */
694204896f8SPawel Wodkowski #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
69547523597SZhiyong Yang uint16_t portid;
69647523597SZhiyong Yang uint8_t count, all_ports_up, print_flag = 0;
697204896f8SPawel Wodkowski struct rte_eth_link link;
69822e5c73bSIgor Romanov int ret;
699db4e8135SIvan Dyukov char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
700204896f8SPawel Wodkowski
701204896f8SPawel Wodkowski printf("\nChecking link status");
702204896f8SPawel Wodkowski fflush(stdout);
703204896f8SPawel Wodkowski for (count = 0; count <= MAX_CHECK_TIME; count++) {
704204896f8SPawel Wodkowski all_ports_up = 1;
7058728ccf3SThomas Monjalon RTE_ETH_FOREACH_DEV(portid) {
706204896f8SPawel Wodkowski if ((port_mask & (1 << portid)) == 0)
707204896f8SPawel Wodkowski continue;
708204896f8SPawel Wodkowski memset(&link, 0, sizeof(link));
70922e5c73bSIgor Romanov ret = rte_eth_link_get_nowait(portid, &link);
71022e5c73bSIgor Romanov if (ret < 0) {
71122e5c73bSIgor Romanov all_ports_up = 0;
71222e5c73bSIgor Romanov if (print_flag == 1)
71322e5c73bSIgor Romanov printf("Port %u link get failed: %s\n",
71422e5c73bSIgor Romanov portid, rte_strerror(-ret));
71522e5c73bSIgor Romanov continue;
71622e5c73bSIgor Romanov }
717204896f8SPawel Wodkowski /* print link status if flag set */
718204896f8SPawel Wodkowski if (print_flag == 1) {
719db4e8135SIvan Dyukov rte_eth_link_to_str(link_status_text,
720db4e8135SIvan Dyukov sizeof(link_status_text), &link);
721db4e8135SIvan Dyukov printf("Port %d %s\n", portid,
722db4e8135SIvan Dyukov link_status_text);
723204896f8SPawel Wodkowski continue;
724204896f8SPawel Wodkowski }
725204896f8SPawel Wodkowski /* clear all_ports_up flag if any link down */
726295968d1SFerruh Yigit if (link.link_status == RTE_ETH_LINK_DOWN) {
727204896f8SPawel Wodkowski all_ports_up = 0;
728204896f8SPawel Wodkowski break;
729204896f8SPawel Wodkowski }
730204896f8SPawel Wodkowski }
731204896f8SPawel Wodkowski /* after finally printing all link status, get out */
732204896f8SPawel Wodkowski if (print_flag == 1)
733204896f8SPawel Wodkowski break;
734204896f8SPawel Wodkowski
735204896f8SPawel Wodkowski if (all_ports_up == 0) {
736204896f8SPawel Wodkowski printf(".");
737204896f8SPawel Wodkowski fflush(stdout);
738204896f8SPawel Wodkowski rte_delay_ms(CHECK_INTERVAL);
739204896f8SPawel Wodkowski }
740204896f8SPawel Wodkowski
741204896f8SPawel Wodkowski /* set the print_flag if all ports up or timeout */
742204896f8SPawel Wodkowski if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
743204896f8SPawel Wodkowski print_flag = 1;
744204896f8SPawel Wodkowski printf("done\n");
745204896f8SPawel Wodkowski }
746204896f8SPawel Wodkowski }
747204896f8SPawel Wodkowski }
748204896f8SPawel Wodkowski
749204896f8SPawel Wodkowski int
main(int argc,char ** argv)750204896f8SPawel Wodkowski main(int argc, char **argv)
751204896f8SPawel Wodkowski {
752204896f8SPawel Wodkowski struct lcore_queue_conf *qconf;
753204896f8SPawel Wodkowski unsigned lcore_id, rx_lcore_id;
754204896f8SPawel Wodkowski unsigned nb_ports_in_mask = 0;
755204896f8SPawel Wodkowski int ret;
756204896f8SPawel Wodkowski char name[RTE_JOBSTATS_NAMESIZE];
757f8244c63SZhiyong Yang uint16_t nb_ports;
7588728ccf3SThomas Monjalon uint16_t nb_ports_available = 0;
759f8244c63SZhiyong Yang uint16_t portid, last_port;
760204896f8SPawel Wodkowski uint8_t i;
761204896f8SPawel Wodkowski
7629a212dc0SConor Fogarty /* Init EAL. 8< */
763204896f8SPawel Wodkowski ret = rte_eal_init(argc, argv);
764204896f8SPawel Wodkowski if (ret < 0)
765204896f8SPawel Wodkowski rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
766204896f8SPawel Wodkowski argc -= ret;
767204896f8SPawel Wodkowski argv += ret;
768204896f8SPawel Wodkowski
769204896f8SPawel Wodkowski /* parse application arguments (after the EAL ones) */
770204896f8SPawel Wodkowski ret = l2fwd_parse_args(argc, argv);
771204896f8SPawel Wodkowski if (ret < 0)
772204896f8SPawel Wodkowski rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n");
7739a212dc0SConor Fogarty /* >8 End of init EAL. */
774204896f8SPawel Wodkowski
775204896f8SPawel Wodkowski rte_timer_subsystem_init();
776204896f8SPawel Wodkowski
777204896f8SPawel Wodkowski /* fetch default timer frequency. */
778204896f8SPawel Wodkowski hz = rte_get_timer_hz();
779204896f8SPawel Wodkowski
7809a212dc0SConor Fogarty /* Create the mbuf pool. 8< */
781204896f8SPawel Wodkowski l2fwd_pktmbuf_pool =
782ea0c20eaSOlivier Matz rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 32,
783824cb29cSKonstantin Ananyev 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
784204896f8SPawel Wodkowski if (l2fwd_pktmbuf_pool == NULL)
785204896f8SPawel Wodkowski rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
7869a212dc0SConor Fogarty /* >8 End of creation of mbuf pool. */
787d9a42a69SThomas Monjalon nb_ports = rte_eth_dev_count_avail();
788204896f8SPawel Wodkowski if (nb_ports == 0)
789204896f8SPawel Wodkowski rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
790204896f8SPawel Wodkowski
7919a212dc0SConor Fogarty /* Reset l2fwd_dst_ports. 8< */
792204896f8SPawel Wodkowski for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
793204896f8SPawel Wodkowski l2fwd_dst_ports[portid] = 0;
794204896f8SPawel Wodkowski last_port = 0;
795204896f8SPawel Wodkowski
796204896f8SPawel Wodkowski /*
797204896f8SPawel Wodkowski * Each logical core is assigned a dedicated TX queue on each port.
798204896f8SPawel Wodkowski */
7998728ccf3SThomas Monjalon RTE_ETH_FOREACH_DEV(portid) {
800204896f8SPawel Wodkowski /* skip ports that are not enabled */
801204896f8SPawel Wodkowski if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
802204896f8SPawel Wodkowski continue;
803204896f8SPawel Wodkowski
804204896f8SPawel Wodkowski if (nb_ports_in_mask % 2) {
805204896f8SPawel Wodkowski l2fwd_dst_ports[portid] = last_port;
806204896f8SPawel Wodkowski l2fwd_dst_ports[last_port] = portid;
807204896f8SPawel Wodkowski } else
808204896f8SPawel Wodkowski last_port = portid;
809204896f8SPawel Wodkowski
810204896f8SPawel Wodkowski nb_ports_in_mask++;
811204896f8SPawel Wodkowski }
8129a212dc0SConor Fogarty /* >8 End of reset l2fwd_dst_ports. */
813204896f8SPawel Wodkowski if (nb_ports_in_mask % 2) {
814204896f8SPawel Wodkowski printf("Notice: odd number of ports in portmask.\n");
815204896f8SPawel Wodkowski l2fwd_dst_ports[last_port] = last_port;
816204896f8SPawel Wodkowski }
817204896f8SPawel Wodkowski
818204896f8SPawel Wodkowski rx_lcore_id = 0;
819204896f8SPawel Wodkowski qconf = NULL;
820204896f8SPawel Wodkowski
821204896f8SPawel Wodkowski /* Initialize the port/queue configuration of each logical core */
8228728ccf3SThomas Monjalon RTE_ETH_FOREACH_DEV(portid) {
823204896f8SPawel Wodkowski /* skip ports that are not enabled */
824204896f8SPawel Wodkowski if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
825204896f8SPawel Wodkowski continue;
826204896f8SPawel Wodkowski
827204896f8SPawel Wodkowski /* get the lcore_id for this port */
828204896f8SPawel Wodkowski while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
829204896f8SPawel Wodkowski lcore_queue_conf[rx_lcore_id].n_rx_port ==
830204896f8SPawel Wodkowski l2fwd_rx_queue_per_lcore) {
831204896f8SPawel Wodkowski rx_lcore_id++;
832204896f8SPawel Wodkowski if (rx_lcore_id >= RTE_MAX_LCORE)
833204896f8SPawel Wodkowski rte_exit(EXIT_FAILURE, "Not enough cores\n");
834204896f8SPawel Wodkowski }
835204896f8SPawel Wodkowski
836204896f8SPawel Wodkowski if (qconf != &lcore_queue_conf[rx_lcore_id])
837204896f8SPawel Wodkowski /* Assigned a new logical core in the loop above. */
838204896f8SPawel Wodkowski qconf = &lcore_queue_conf[rx_lcore_id];
839204896f8SPawel Wodkowski
840204896f8SPawel Wodkowski qconf->rx_port_list[qconf->n_rx_port] = portid;
841204896f8SPawel Wodkowski qconf->n_rx_port++;
842f8244c63SZhiyong Yang printf("Lcore %u: RX port %u\n", rx_lcore_id, portid);
843204896f8SPawel Wodkowski }
844204896f8SPawel Wodkowski
845204896f8SPawel Wodkowski /* Initialise each port */
8468728ccf3SThomas Monjalon RTE_ETH_FOREACH_DEV(portid) {
8470cdee235SShahaf Shuler struct rte_eth_dev_info dev_info;
8480cdee235SShahaf Shuler struct rte_eth_rxconf rxq_conf;
8490cdee235SShahaf Shuler struct rte_eth_txconf txq_conf;
8500cdee235SShahaf Shuler struct rte_eth_conf local_port_conf = port_conf;
8510cdee235SShahaf Shuler
852204896f8SPawel Wodkowski /* skip ports that are not enabled */
853204896f8SPawel Wodkowski if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
854f8244c63SZhiyong Yang printf("Skipping disabled port %u\n", portid);
855204896f8SPawel Wodkowski continue;
856204896f8SPawel Wodkowski }
8578728ccf3SThomas Monjalon nb_ports_available++;
8588728ccf3SThomas Monjalon
859204896f8SPawel Wodkowski /* init port */
860f8244c63SZhiyong Yang printf("Initializing port %u... ", portid);
861204896f8SPawel Wodkowski fflush(stdout);
862089e5ed7SIvan Ilchenko
863089e5ed7SIvan Ilchenko ret = rte_eth_dev_info_get(portid, &dev_info);
864089e5ed7SIvan Ilchenko if (ret != 0)
865089e5ed7SIvan Ilchenko rte_exit(EXIT_FAILURE,
866089e5ed7SIvan Ilchenko "Error during getting device (port %u) info: %s\n",
867089e5ed7SIvan Ilchenko portid, strerror(-ret));
868089e5ed7SIvan Ilchenko
869295968d1SFerruh Yigit if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
8700cdee235SShahaf Shuler local_port_conf.txmode.offloads |=
871295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
8729a212dc0SConor Fogarty /* Configure the RX and TX queues. 8< */
8730cdee235SShahaf Shuler ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
874204896f8SPawel Wodkowski if (ret < 0)
875204896f8SPawel Wodkowski rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
876f8244c63SZhiyong Yang ret, portid);
8779a212dc0SConor Fogarty /* >8 End of configuring the RX and TX queues. */
878204896f8SPawel Wodkowski
87960efb44fSRoman Zhukov ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
88060efb44fSRoman Zhukov &nb_txd);
88160efb44fSRoman Zhukov if (ret < 0)
88260efb44fSRoman Zhukov rte_exit(EXIT_FAILURE,
88360efb44fSRoman Zhukov "Cannot adjust number of descriptors: err=%d, port=%u\n",
884f8244c63SZhiyong Yang ret, portid);
88560efb44fSRoman Zhukov
88670febdcfSIgor Romanov ret = rte_eth_macaddr_get(portid,
88770febdcfSIgor Romanov &l2fwd_ports_eth_addr[portid]);
88870febdcfSIgor Romanov if (ret < 0)
88970febdcfSIgor Romanov rte_exit(EXIT_FAILURE,
89070febdcfSIgor Romanov "Cannot get MAC address: err=%d, port=%u\n",
89170febdcfSIgor Romanov ret, portid);
892204896f8SPawel Wodkowski
893204896f8SPawel Wodkowski /* init one RX queue */
894204896f8SPawel Wodkowski fflush(stdout);
8950cdee235SShahaf Shuler rxq_conf = dev_info.default_rxconf;
8960cdee235SShahaf Shuler rxq_conf.offloads = local_port_conf.rxmode.offloads;
8979a212dc0SConor Fogarty /* RX queue initialization. 8< */
898204896f8SPawel Wodkowski ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
899204896f8SPawel Wodkowski rte_eth_dev_socket_id(portid),
9000cdee235SShahaf Shuler &rxq_conf,
901204896f8SPawel Wodkowski l2fwd_pktmbuf_pool);
902204896f8SPawel Wodkowski if (ret < 0)
903204896f8SPawel Wodkowski rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
904f8244c63SZhiyong Yang ret, portid);
9059a212dc0SConor Fogarty /* >8 End of RX queue initialization. */
906204896f8SPawel Wodkowski
9079a212dc0SConor Fogarty /* Init one TX queue on each port. 8< */
9080cdee235SShahaf Shuler txq_conf = dev_info.default_txconf;
9090cdee235SShahaf Shuler txq_conf.offloads = local_port_conf.txmode.offloads;
910204896f8SPawel Wodkowski fflush(stdout);
911204896f8SPawel Wodkowski ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
912204896f8SPawel Wodkowski rte_eth_dev_socket_id(portid),
9130cdee235SShahaf Shuler &txq_conf);
914204896f8SPawel Wodkowski if (ret < 0)
915f8244c63SZhiyong Yang rte_exit(EXIT_FAILURE,
916f8244c63SZhiyong Yang "rte_eth_tx_queue_setup:err=%d, port=%u\n",
917f8244c63SZhiyong Yang ret, portid);
9189a212dc0SConor Fogarty /* >8 End of init one TX queue on each port. */
919204896f8SPawel Wodkowski
920e2366e74STomasz Kulasek /* Initialize TX buffers */
921e2366e74STomasz Kulasek tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
922e2366e74STomasz Kulasek RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0,
923e2366e74STomasz Kulasek rte_eth_dev_socket_id(portid));
924e2366e74STomasz Kulasek if (tx_buffer[portid] == NULL)
925e2366e74STomasz Kulasek rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
926f8244c63SZhiyong Yang portid);
927e2366e74STomasz Kulasek
928e2366e74STomasz Kulasek rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST);
929e2366e74STomasz Kulasek
930e2366e74STomasz Kulasek ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[portid],
931e2366e74STomasz Kulasek rte_eth_tx_buffer_count_callback,
932e2366e74STomasz Kulasek &port_statistics[portid].dropped);
933e2366e74STomasz Kulasek if (ret < 0)
934f8244c63SZhiyong Yang rte_exit(EXIT_FAILURE,
935f8244c63SZhiyong Yang "Cannot set error callback for tx buffer on port %u\n",
936f8244c63SZhiyong Yang portid);
937e2366e74STomasz Kulasek
938204896f8SPawel Wodkowski /* Start device */
939204896f8SPawel Wodkowski ret = rte_eth_dev_start(portid);
940204896f8SPawel Wodkowski if (ret < 0)
941204896f8SPawel Wodkowski rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err=%d, port=%u\n",
942f8244c63SZhiyong Yang ret, portid);
943204896f8SPawel Wodkowski
944204896f8SPawel Wodkowski printf("done:\n");
945204896f8SPawel Wodkowski
946f430bbceSIvan Ilchenko ret = rte_eth_promiscuous_enable(portid);
947f430bbceSIvan Ilchenko if (ret != 0) {
948f430bbceSIvan Ilchenko rte_exit(EXIT_FAILURE,
949f430bbceSIvan Ilchenko "rte_eth_promiscuous_enable:err=%s, port=%u\n",
950f430bbceSIvan Ilchenko rte_strerror(-ret), portid);
951f430bbceSIvan Ilchenko return ret;
952f430bbceSIvan Ilchenko
953f430bbceSIvan Ilchenko }
954204896f8SPawel Wodkowski
955c2c4f87bSAman Deep Singh printf("Port %u, MAC address: " RTE_ETHER_ADDR_PRT_FMT "\n\n",
956f8244c63SZhiyong Yang portid,
957a7db3afcSAman Deep Singh RTE_ETHER_ADDR_BYTES(&l2fwd_ports_eth_addr[portid]));
958204896f8SPawel Wodkowski
959204896f8SPawel Wodkowski /* initialize port stats */
960204896f8SPawel Wodkowski memset(&port_statistics, 0, sizeof(port_statistics));
961204896f8SPawel Wodkowski }
962204896f8SPawel Wodkowski
963204896f8SPawel Wodkowski if (!nb_ports_available) {
964204896f8SPawel Wodkowski rte_exit(EXIT_FAILURE,
965204896f8SPawel Wodkowski "All available ports are disabled. Please set portmask.\n");
966204896f8SPawel Wodkowski }
967204896f8SPawel Wodkowski
9688728ccf3SThomas Monjalon check_all_ports_link_status(l2fwd_enabled_port_mask);
969204896f8SPawel Wodkowski
970204896f8SPawel Wodkowski drain_tsc = (hz + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
971204896f8SPawel Wodkowski
972204896f8SPawel Wodkowski RTE_LCORE_FOREACH(lcore_id) {
973204896f8SPawel Wodkowski qconf = &lcore_queue_conf[lcore_id];
974204896f8SPawel Wodkowski
975204896f8SPawel Wodkowski rte_spinlock_init(&qconf->lock);
976204896f8SPawel Wodkowski
977204896f8SPawel Wodkowski if (rte_jobstats_context_init(&qconf->jobs_context) != 0)
978204896f8SPawel Wodkowski rte_panic("Jobs stats context for core %u init failed\n", lcore_id);
979204896f8SPawel Wodkowski
980204896f8SPawel Wodkowski if (qconf->n_rx_port == 0) {
981204896f8SPawel Wodkowski RTE_LOG(INFO, L2FWD,
982204896f8SPawel Wodkowski "lcore %u: no ports so no jobs stats context initialization\n",
983204896f8SPawel Wodkowski lcore_id);
984204896f8SPawel Wodkowski continue;
985204896f8SPawel Wodkowski }
9869a212dc0SConor Fogarty /* Add flush job. 8< */
9879a212dc0SConor Fogarty
9889a212dc0SConor Fogarty /* Set fixed period by setting min = max = initial period. Set target to
9899a212dc0SConor Fogarty * zero as it is irrelevant for this job.
9909a212dc0SConor Fogarty */
991204896f8SPawel Wodkowski rte_jobstats_init(&qconf->flush_job, "flush", drain_tsc, drain_tsc,
992204896f8SPawel Wodkowski drain_tsc, 0);
993204896f8SPawel Wodkowski
994204896f8SPawel Wodkowski rte_timer_init(&qconf->flush_timer);
995676b1bd2SPawel Wodkowski ret = rte_timer_reset(&qconf->flush_timer, drain_tsc, PERIODICAL,
996676b1bd2SPawel Wodkowski lcore_id, &l2fwd_flush_job, NULL);
997204896f8SPawel Wodkowski
998204896f8SPawel Wodkowski if (ret < 0) {
999676b1bd2SPawel Wodkowski rte_exit(1, "Failed to reset flush job timer for lcore %u: %s",
1000204896f8SPawel Wodkowski lcore_id, rte_strerror(-ret));
1001204896f8SPawel Wodkowski }
10029a212dc0SConor Fogarty /* >8 End of add flush job. */
1003204896f8SPawel Wodkowski
1004204896f8SPawel Wodkowski for (i = 0; i < qconf->n_rx_port; i++) {
1005204896f8SPawel Wodkowski struct rte_jobstats *job = &qconf->port_fwd_jobs[i];
1006204896f8SPawel Wodkowski
1007204896f8SPawel Wodkowski portid = qconf->rx_port_list[i];
10089de6337dSRami Rosen printf("Setting forward job for port %u\n", portid);
1009204896f8SPawel Wodkowski
1010204896f8SPawel Wodkowski snprintf(name, RTE_DIM(name), "port %u fwd", portid);
10119a212dc0SConor Fogarty /* Setup forward job. 8< */
10129a212dc0SConor Fogarty
10139a212dc0SConor Fogarty /* Set min, max and initial period. Set target to MAX_PKT_BURST as
10149a212dc0SConor Fogarty * this is desired optimal RX/TX burst size.
10159a212dc0SConor Fogarty */
1016204896f8SPawel Wodkowski rte_jobstats_init(job, name, 0, drain_tsc, 0, MAX_PKT_BURST);
1017204896f8SPawel Wodkowski rte_jobstats_set_update_period_function(job, l2fwd_job_update_cb);
1018204896f8SPawel Wodkowski
1019204896f8SPawel Wodkowski rte_timer_init(&qconf->rx_timers[i]);
1020676b1bd2SPawel Wodkowski ret = rte_timer_reset(&qconf->rx_timers[i], 0, PERIODICAL, lcore_id,
1021204896f8SPawel Wodkowski &l2fwd_fwd_job, (void *)(uintptr_t)i);
1022676b1bd2SPawel Wodkowski
1023676b1bd2SPawel Wodkowski if (ret < 0) {
1024676b1bd2SPawel Wodkowski rte_exit(1, "Failed to reset lcore %u port %u job timer: %s",
1025676b1bd2SPawel Wodkowski lcore_id, qconf->rx_port_list[i], rte_strerror(-ret));
1026676b1bd2SPawel Wodkowski }
10279a212dc0SConor Fogarty /* >8 End of forward job. */
1028204896f8SPawel Wodkowski }
1029204896f8SPawel Wodkowski }
1030204896f8SPawel Wodkowski
1031204896f8SPawel Wodkowski if (timer_period)
1032204896f8SPawel Wodkowski rte_eal_alarm_set(timer_period * MS_PER_S, show_stats_cb, NULL);
1033204896f8SPawel Wodkowski else
1034204896f8SPawel Wodkowski RTE_LOG(INFO, L2FWD, "Stats display disabled\n");
1035204896f8SPawel Wodkowski
1036204896f8SPawel Wodkowski /* launch per-lcore init on every lcore */
1037cb056611SStephen Hemminger rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, NULL, CALL_MAIN);
1038cb056611SStephen Hemminger RTE_LCORE_FOREACH_WORKER(lcore_id) {
1039204896f8SPawel Wodkowski if (rte_eal_wait_lcore(lcore_id) < 0)
1040204896f8SPawel Wodkowski return -1;
1041204896f8SPawel Wodkowski }
1042204896f8SPawel Wodkowski
104310aa3757SChengchang Tang /* clean up the EAL */
104410aa3757SChengchang Tang rte_eal_cleanup();
104510aa3757SChengchang Tang
1046204896f8SPawel Wodkowski return 0;
1047204896f8SPawel Wodkowski }
1048