1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <stdint.h>
8 #include <inttypes.h>
9 #include <sys/types.h>
10 #include <string.h>
11 #include <sys/queue.h>
12 #include <stdarg.h>
13 #include <errno.h>
14 #include <getopt.h>
15
16 #include <rte_common.h>
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
21 #include <rte_eal.h>
22 #include <rte_per_lcore.h>
23 #include <rte_launch.h>
24 #include <rte_cycles.h>
25 #include <rte_prefetch.h>
26 #include <rte_branch_prediction.h>
27 #include <rte_interrupts.h>
28 #include <rte_pci.h>
29 #include <rte_random.h>
30 #include <rte_debug.h>
31 #include <rte_ether.h>
32 #include <rte_ethdev.h>
33 #include <rte_ring.h>
34 #include <rte_mempool.h>
35 #include <rte_mbuf.h>
36 #include <rte_ip.h>
37 #include <rte_tcp.h>
38 #include <rte_lpm.h>
39 #include <rte_lpm6.h>
40 #include <rte_malloc.h>
41
42 #include "main.h"
43
44 void
app_main_loop_rx(void)45 app_main_loop_rx(void) {
46 uint32_t i;
47 int ret;
48
49 RTE_LOG(INFO, USER1, "Core %u is doing RX\n", rte_lcore_id());
50
51 while (!force_quit) {
52 for (i = 0; i < app.n_ports; i++) {
53 uint16_t n_mbufs;
54
55 n_mbufs = rte_eth_rx_burst(
56 app.ports[i],
57 0,
58 app.mbuf_rx.array,
59 app.burst_size_rx_read);
60
61 if (n_mbufs == 0)
62 continue;
63
64 do {
65 ret = rte_ring_sp_enqueue_bulk(
66 app.rings_rx[i],
67 (void **) app.mbuf_rx.array,
68 n_mbufs, NULL);
69 } while (ret == 0 && !force_quit);
70 }
71 }
72 }
73
74 void
app_main_loop_worker(void)75 app_main_loop_worker(void) {
76 struct app_mbuf_array *worker_mbuf;
77 uint32_t i;
78
79 RTE_LOG(INFO, USER1, "Core %u is doing work (no pipeline)\n",
80 rte_lcore_id());
81
82 worker_mbuf = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array),
83 RTE_CACHE_LINE_SIZE, rte_socket_id());
84 if (worker_mbuf == NULL)
85 rte_panic("Worker thread: cannot allocate buffer space\n");
86
87 while (!force_quit) {
88 for (i = 0; i < app.n_ports; i++) {
89 int ret;
90
91 ret = rte_ring_sc_dequeue_bulk(
92 app.rings_rx[i],
93 (void **) worker_mbuf->array,
94 app.burst_size_worker_read,
95 NULL);
96
97 if (ret == 0)
98 continue;
99
100 do {
101 ret = rte_ring_sp_enqueue_bulk(
102 app.rings_tx[i ^ 1],
103 (void **) worker_mbuf->array,
104 app.burst_size_worker_write,
105 NULL);
106 } while (ret == 0 && !force_quit);
107 }
108 }
109 }
110
111 void
app_main_loop_tx(void)112 app_main_loop_tx(void) {
113 uint32_t i;
114
115 RTE_LOG(INFO, USER1, "Core %u is doing TX\n", rte_lcore_id());
116
117 while (!force_quit) {
118 for (i = 0; i < app.n_ports; i++) {
119 uint16_t n_mbufs, n_pkts;
120 int ret;
121
122 n_mbufs = app.mbuf_tx[i].n_mbufs;
123
124 ret = rte_ring_sc_dequeue_bulk(
125 app.rings_tx[i],
126 (void **) &app.mbuf_tx[i].array[n_mbufs],
127 app.burst_size_tx_read,
128 NULL);
129
130 if (ret == 0)
131 continue;
132
133 n_mbufs += app.burst_size_tx_read;
134
135 if (n_mbufs < app.burst_size_tx_write) {
136 app.mbuf_tx[i].n_mbufs = n_mbufs;
137 continue;
138 }
139
140 n_pkts = rte_eth_tx_burst(
141 app.ports[i],
142 0,
143 app.mbuf_tx[i].array,
144 n_mbufs);
145
146 if (n_pkts < n_mbufs) {
147 uint16_t k;
148
149 for (k = n_pkts; k < n_mbufs; k++) {
150 struct rte_mbuf *pkt_to_free;
151
152 pkt_to_free = app.mbuf_tx[i].array[k];
153 rte_pktmbuf_free(pkt_to_free);
154 }
155 }
156
157 app.mbuf_tx[i].n_mbufs = 0;
158 }
159 }
160 }
161