1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
3 */
4
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <unistd.h>
9 #include <stdint.h>
10 #include <stdarg.h>
11 #include <inttypes.h>
12 #include <sys/queue.h>
13 #include <errno.h>
14 #include <sys/types.h>
15 #include <netinet/in.h>
16 #include <netinet/ip.h>
17
18 #include <rte_common.h>
19 #include <rte_memory.h>
20 #include <rte_eal.h>
21 #include <rte_launch.h>
22 #include <rte_per_lcore.h>
23 #include <rte_lcore.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_ring.h>
26 #include <rte_log.h>
27 #include <rte_debug.h>
28 #include <rte_mempool.h>
29 #include <rte_memcpy.h>
30 #include <rte_mbuf.h>
31 #include <rte_ether.h>
32 #include <rte_interrupts.h>
33 #include <rte_ethdev.h>
34 #include <rte_byteorder.h>
35 #include <rte_malloc.h>
36 #include <rte_string_fns.h>
37 #include <rte_efd.h>
38 #include <rte_ip.h>
39
40 #include "common.h"
41 #include "args.h"
42 #include "init.h"
43
44 /*
45 * When doing reads from the NIC or the node queues,
46 * use this batch size
47 */
48 #define PACKET_READ_SIZE 32
49
50 /*
51 * Local buffers to put packets in, used to send packets in bursts to the
52 * nodes
53 */
54 struct node_rx_buf {
55 struct rte_mbuf *buffer[PACKET_READ_SIZE];
56 uint16_t count;
57 };
58
59 struct efd_stats {
60 uint64_t distributed;
61 uint64_t drop;
62 } flow_dist_stats;
63
64 /* One buffer per node rx queue - dynamically allocate array */
65 static struct node_rx_buf *cl_rx_buf;
66
67 static const char *
get_printable_mac_addr(uint16_t port)68 get_printable_mac_addr(uint16_t port)
69 {
70 static const char err_address[] = "00:00:00:00:00:00";
71 static char addresses[RTE_MAX_ETHPORTS][sizeof(err_address)];
72 struct rte_ether_addr mac;
73 int ret;
74
75 if (unlikely(port >= RTE_MAX_ETHPORTS))
76 return err_address;
77 if (unlikely(addresses[port][0] == '\0')) {
78 ret = rte_eth_macaddr_get(port, &mac);
79 if (ret != 0) {
80 printf("Failed to get MAC address (port %u): %s\n",
81 port, rte_strerror(-ret));
82 return err_address;
83 }
84
85 snprintf(addresses[port], sizeof(addresses[port]),
86 RTE_ETHER_ADDR_PRT_FMT "\n",
87 RTE_ETHER_ADDR_BYTES(&mac));
88 }
89 return addresses[port];
90 }
91
92 /*
93 * This function displays the recorded statistics for each port
94 * and for each node. It uses ANSI terminal codes to clear
95 * screen when called. It is called from a single worker
96 * thread in the server process, when the process is run with more
97 * than one lcore enabled.
98 */
99
100 /* Display recorded statistics. 8< */
101 static void
do_stats_display(void)102 do_stats_display(void)
103 {
104 unsigned int i, j;
105 const char clr[] = {27, '[', '2', 'J', '\0'};
106 const char topLeft[] = {27, '[', '1', ';', '1', 'H', '\0'};
107 uint64_t port_tx[RTE_MAX_ETHPORTS], port_tx_drop[RTE_MAX_ETHPORTS];
108 uint64_t node_tx[MAX_NODES], node_tx_drop[MAX_NODES];
109
110 /* to get TX stats, we need to do some summing calculations */
111 memset(port_tx, 0, sizeof(port_tx));
112 memset(port_tx_drop, 0, sizeof(port_tx_drop));
113 memset(node_tx, 0, sizeof(node_tx));
114 memset(node_tx_drop, 0, sizeof(node_tx_drop));
115
116 for (i = 0; i < num_nodes; i++) {
117 const struct tx_stats *tx = &info->tx_stats[i];
118
119 for (j = 0; j < info->num_ports; j++) {
120 const uint64_t tx_val = tx->tx[info->id[j]];
121 const uint64_t drop_val = tx->tx_drop[info->id[j]];
122
123 port_tx[j] += tx_val;
124 port_tx_drop[j] += drop_val;
125 node_tx[i] += tx_val;
126 node_tx_drop[i] += drop_val;
127 }
128 }
129
130 /* Clear screen and move to top left */
131 printf("%s%s", clr, topLeft);
132
133 printf("PORTS\n");
134 printf("-----\n");
135 for (i = 0; i < info->num_ports; i++)
136 printf("Port %u: '%s'\t", (unsigned int)info->id[i],
137 get_printable_mac_addr(info->id[i]));
138 printf("\n\n");
139 for (i = 0; i < info->num_ports; i++) {
140 printf("Port %u - rx: %9"PRIu64"\t"
141 "tx: %9"PRIu64"\n",
142 (unsigned int)info->id[i], info->rx_stats.rx[i],
143 port_tx[i]);
144 }
145
146 printf("\nSERVER\n");
147 printf("-----\n");
148 printf("distributed: %9"PRIu64", drop: %9"PRIu64"\n",
149 flow_dist_stats.distributed, flow_dist_stats.drop);
150
151 printf("\nNODES\n");
152 printf("-------\n");
153 for (i = 0; i < num_nodes; i++) {
154 const unsigned long long rx = nodes[i].stats.rx;
155 const unsigned long long rx_drop = nodes[i].stats.rx_drop;
156 const struct filter_stats *filter = &info->filter_stats[i];
157
158 printf("Node %2u - rx: %9llu, rx_drop: %9llu\n"
159 " tx: %9"PRIu64", tx_drop: %9"PRIu64"\n"
160 " filter_passed: %9"PRIu64", "
161 "filter_drop: %9"PRIu64"\n",
162 i, rx, rx_drop, node_tx[i], node_tx_drop[i],
163 filter->passed, filter->drop);
164 }
165
166 printf("\n");
167 }
168 /* >8 End of displaying the recorded statistics. */
169
170 /*
171 * The function called from each non-main lcore used by the process.
172 * The test_and_set function is used to randomly pick a single lcore on which
173 * the code to display the statistics will run. Otherwise, the code just
174 * repeatedly sleeps.
175 */
176 static int
sleep_lcore(__rte_unused void * dummy)177 sleep_lcore(__rte_unused void *dummy)
178 {
179 /* Used to pick a display thread - static, so zero-initialised */
180 static RTE_ATOMIC(uint32_t) display_stats;
181
182 /* Only one core should display stats */
183 uint32_t display_init = 0;
184 if (rte_atomic_compare_exchange_strong_explicit(&display_stats, &display_init, 1,
185 rte_memory_order_relaxed, rte_memory_order_relaxed)) {
186 const unsigned int sleeptime = 1;
187
188 printf("Core %u displaying statistics\n", rte_lcore_id());
189
190 /* Longer initial pause so above printf is seen */
191 sleep(sleeptime * 3);
192
193 /* Loop forever: sleep always returns 0 or <= param */
194 while (sleep(sleeptime) <= sleeptime)
195 do_stats_display();
196 }
197 return 0;
198 }
199
200 /*
201 * Function to set all the node statistic values to zero.
202 * Called at program startup.
203 */
204 static void
clear_stats(void)205 clear_stats(void)
206 {
207 unsigned int i;
208
209 for (i = 0; i < num_nodes; i++)
210 nodes[i].stats.rx = nodes[i].stats.rx_drop = 0;
211 }
212
213 /*
214 * send a burst of traffic to a node, assuming there are packets
215 * available to be sent to this node
216 */
217
218 /* Flush rx queue. 8< */
219 static void
flush_rx_queue(uint16_t node)220 flush_rx_queue(uint16_t node)
221 {
222 uint16_t j;
223 struct node *cl;
224
225 if (cl_rx_buf[node].count == 0)
226 return;
227
228 cl = &nodes[node];
229 if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
230 cl_rx_buf[node].count, NULL) != cl_rx_buf[node].count){
231 for (j = 0; j < cl_rx_buf[node].count; j++)
232 rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
233 cl->stats.rx_drop += cl_rx_buf[node].count;
234 } else
235 cl->stats.rx += cl_rx_buf[node].count;
236
237 cl_rx_buf[node].count = 0;
238 }
239 /* >8 End of sending a burst of traffic to a node. */
240
241 /*
242 * marks a packet down to be sent to a particular node process
243 */
244 static inline void
enqueue_rx_packet(uint8_t node,struct rte_mbuf * buf)245 enqueue_rx_packet(uint8_t node, struct rte_mbuf *buf)
246 {
247 cl_rx_buf[node].buffer[cl_rx_buf[node].count++] = buf;
248 }
249
250 /*
251 * This function takes a group of packets and routes them
252 * individually to the node process. Very simply round-robins the packets
253 * without checking any of the packet contents. 8<
254 */
255
256 /* Processing packets. 8< */
257 static void
process_packets(uint32_t port_num __rte_unused,struct rte_mbuf * pkts[],uint16_t rx_count,unsigned int socket_id)258 process_packets(uint32_t port_num __rte_unused, struct rte_mbuf *pkts[],
259 uint16_t rx_count, unsigned int socket_id)
260 {
261 uint16_t i;
262 uint8_t node;
263 efd_value_t data[RTE_EFD_BURST_MAX];
264 const void *key_ptrs[RTE_EFD_BURST_MAX];
265
266 struct rte_ipv4_hdr *ipv4_hdr;
267 uint32_t ipv4_dst_ip[RTE_EFD_BURST_MAX];
268
269 for (i = 0; i < rx_count; i++) {
270 /* Handle IPv4 header.*/
271 ipv4_hdr = rte_pktmbuf_mtod_offset(pkts[i],
272 struct rte_ipv4_hdr *, sizeof(struct rte_ether_hdr));
273 ipv4_dst_ip[i] = ipv4_hdr->dst_addr;
274 key_ptrs[i] = (void *)&ipv4_dst_ip[i];
275 }
276
277 rte_efd_lookup_bulk(efd_table, socket_id, rx_count,
278 (const void **) key_ptrs, data);
279 for (i = 0; i < rx_count; i++) {
280 node = (uint8_t) ((uintptr_t)data[i]);
281
282 if (node >= num_nodes) {
283 /*
284 * Node is out of range, which means that
285 * flow has not been inserted
286 */
287 flow_dist_stats.drop++;
288 rte_pktmbuf_free(pkts[i]);
289 } else {
290 flow_dist_stats.distributed++;
291 enqueue_rx_packet(node, pkts[i]);
292 }
293 }
294
295 for (i = 0; i < num_nodes; i++)
296 flush_rx_queue(i);
297 }
298 /* >8 End of process_packets. */
299
300 /*
301 * Function called by the main lcore of the DPDK process.
302 */
303 static void
do_packet_forwarding(void)304 do_packet_forwarding(void)
305 {
306 unsigned int port_num = 0; /* indexes the port[] array */
307 unsigned int socket_id = rte_socket_id();
308
309 for (;;) {
310 struct rte_mbuf *buf[PACKET_READ_SIZE];
311 uint16_t rx_count;
312
313 /* read a port */
314 rx_count = rte_eth_rx_burst(info->id[port_num], 0,
315 buf, PACKET_READ_SIZE);
316 info->rx_stats.rx[port_num] += rx_count;
317
318 /* Now process the NIC packets read */
319 if (likely(rx_count > 0))
320 process_packets(port_num, buf, rx_count, socket_id);
321
322 /* move to next port */
323 if (++port_num == info->num_ports)
324 port_num = 0;
325 }
326 }
327
328 int
main(int argc,char * argv[])329 main(int argc, char *argv[])
330 {
331 /* initialise the system */
332 if (init(argc, argv) < 0)
333 return -1;
334 RTE_LOG(INFO, APP, "Finished Process Init.\n");
335
336 cl_rx_buf = calloc(num_nodes, sizeof(cl_rx_buf[0]));
337
338 /* clear statistics */
339 clear_stats();
340
341 /* put all other cores to sleep except main */
342 rte_eal_mp_remote_launch(sleep_lcore, NULL, SKIP_MAIN);
343
344 do_packet_forwarding();
345
346 /* clean up the EAL */
347 rte_eal_cleanup();
348
349 return 0;
350 }
351