1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
3 */
4
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <inttypes.h>
8 #include <stdarg.h>
9 #include <errno.h>
10 #include <sys/queue.h>
11 #include <stdlib.h>
12 #include <getopt.h>
13 #include <string.h>
14
15 #include <rte_common.h>
16 #include <rte_malloc.h>
17 #include <rte_memory.h>
18 #include <rte_memzone.h>
19 #include <rte_eal.h>
20 #include <rte_branch_prediction.h>
21 #include <rte_log.h>
22 #include <rte_per_lcore.h>
23 #include <rte_launch.h>
24 #include <rte_lcore.h>
25 #include <rte_ring.h>
26 #include <rte_debug.h>
27 #include <rte_mempool.h>
28 #include <rte_mbuf.h>
29 #include <rte_interrupts.h>
30 #include <rte_ether.h>
31 #include <rte_ethdev.h>
32 #include <rte_string_fns.h>
33 #include <rte_ip.h>
34
35 #include "common.h"
36
37 /* Number of packets to attempt to read from queue */
38 #define PKT_READ_SIZE ((uint16_t)32)
39
40 /*
41 * Our node id number - tells us which rx queue to read, and NIC TX
42 * queue to write to.
43 */
44 static uint8_t node_id;
45
46 #define MBQ_CAPACITY 32
47
48 /* maps input ports to output ports for packets */
49 static uint16_t output_ports[RTE_MAX_ETHPORTS];
50
51 /* buffers up a set of packet that are ready to send */
52 struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
53
54 /* shared data from server. We update statistics here */
55 static struct tx_stats *tx_stats;
56
57 static struct filter_stats *filter_stats;
58
59 /*
60 * print a usage message
61 */
62 static void
usage(const char * progname)63 usage(const char *progname)
64 {
65 printf("Usage: %s [EAL args] -- -n <node_id>\n\n", progname);
66 }
67
68 /*
69 * Convert the node id number from a string to an int.
70 */
71 static int
parse_node_num(const char * node)72 parse_node_num(const char *node)
73 {
74 char *end = NULL;
75 unsigned long temp;
76
77 if (node == NULL || *node == '\0')
78 return -1;
79
80 temp = strtoul(node, &end, 10);
81 if (end == NULL || *end != '\0')
82 return -1;
83
84 node_id = (uint8_t)temp;
85 return 0;
86 }
87
88 /*
89 * Parse the application arguments to the node app.
90 */
91 static int
parse_app_args(int argc,char * argv[])92 parse_app_args(int argc, char *argv[])
93 {
94 int option_index, opt;
95 char **argvopt = argv;
96 const char *progname = NULL;
97 static struct option lgopts[] = { /* no long options */
98 {NULL, 0, 0, 0 }
99 };
100 progname = argv[0];
101
102 while ((opt = getopt_long(argc, argvopt, "n:", lgopts,
103 &option_index)) != EOF) {
104 switch (opt) {
105 case 'n':
106 if (parse_node_num(optarg) != 0) {
107 usage(progname);
108 return -1;
109 }
110 break;
111 default:
112 usage(progname);
113 return -1;
114 }
115 }
116 return 0;
117 }
118
119 /*
120 * Tx buffer error callback
121 */
122 static void
flush_tx_error_callback(struct rte_mbuf ** unsent,uint16_t count,void * userdata)123 flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count,
124 void *userdata) {
125 int i;
126 uint16_t port_id = (uintptr_t)userdata;
127
128 tx_stats->tx_drop[port_id] += count;
129
130 /* free the mbufs which failed from transmit */
131 for (i = 0; i < count; i++)
132 rte_pktmbuf_free(unsent[i]);
133
134 }
135
136 static void
configure_tx_buffer(uint16_t port_id,uint16_t size)137 configure_tx_buffer(uint16_t port_id, uint16_t size)
138 {
139 int ret;
140
141 /* Initialize TX buffers */
142 tx_buffer[port_id] = rte_zmalloc_socket("tx_buffer",
143 RTE_ETH_TX_BUFFER_SIZE(size), 0,
144 rte_eth_dev_socket_id(port_id));
145 if (tx_buffer[port_id] == NULL)
146 rte_exit(EXIT_FAILURE,
147 "Cannot allocate buffer for tx on port %u\n", port_id);
148
149 rte_eth_tx_buffer_init(tx_buffer[port_id], size);
150
151 ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[port_id],
152 flush_tx_error_callback, (void *)(intptr_t)port_id);
153 if (ret < 0)
154 rte_exit(EXIT_FAILURE,
155 "Cannot set error callback for tx buffer on port %u\n",
156 port_id);
157 }
158
159 /*
160 * set up output ports so that all traffic on port gets sent out
161 * its paired port. Index using actual port numbers since that is
162 * what comes in the mbuf structure.
163 */
164 static void
configure_output_ports(const struct shared_info * info)165 configure_output_ports(const struct shared_info *info)
166 {
167 int i;
168
169 if (info->num_ports > RTE_MAX_ETHPORTS)
170 rte_exit(EXIT_FAILURE, "Too many ethernet ports. "
171 "RTE_MAX_ETHPORTS = %u\n",
172 (unsigned int)RTE_MAX_ETHPORTS);
173 for (i = 0; i < info->num_ports - 1; i += 2) {
174 uint8_t p1 = info->id[i];
175 uint8_t p2 = info->id[i+1];
176
177 output_ports[p1] = p2;
178 output_ports[p2] = p1;
179
180 configure_tx_buffer(p1, MBQ_CAPACITY);
181 configure_tx_buffer(p2, MBQ_CAPACITY);
182
183 }
184 }
185
186 /*
187 * Create the hash table that will contain the flows that
188 * the node will handle, which will be used to decide if packet
189 * is transmitted or dropped.
190 */
191
192 /* Creation of hash table. 8< */
193 static struct rte_hash *
create_hash_table(const struct shared_info * info)194 create_hash_table(const struct shared_info *info)
195 {
196 uint32_t num_flows_node = info->num_flows / info->num_nodes;
197 char name[RTE_HASH_NAMESIZE];
198 struct rte_hash *h;
199
200 /* create table */
201 struct rte_hash_parameters hash_params = {
202 .entries = num_flows_node * 2, /* table load = 50% */
203 .key_len = sizeof(uint32_t), /* Store IPv4 dest IP address */
204 .socket_id = rte_socket_id(),
205 .hash_func_init_val = 0,
206 };
207
208 snprintf(name, sizeof(name), "hash_table_%d", node_id);
209 hash_params.name = name;
210 h = rte_hash_create(&hash_params);
211
212 if (h == NULL)
213 rte_exit(EXIT_FAILURE,
214 "Problem creating the hash table for node %d\n",
215 node_id);
216 return h;
217 }
218
219 static void
populate_hash_table(const struct rte_hash * h,const struct shared_info * info)220 populate_hash_table(const struct rte_hash *h, const struct shared_info *info)
221 {
222 unsigned int i;
223 int32_t ret;
224 uint32_t ip_dst;
225 uint32_t num_flows_node = 0;
226 uint64_t target_node;
227
228 /* Add flows in table */
229 for (i = 0; i < info->num_flows; i++) {
230 target_node = i % info->num_nodes;
231 if (target_node != node_id)
232 continue;
233
234 ip_dst = rte_cpu_to_be_32(i);
235
236 ret = rte_hash_add_key(h, (void *) &ip_dst);
237 if (ret < 0)
238 rte_exit(EXIT_FAILURE, "Unable to add entry %u "
239 "in hash table\n", i);
240 else
241 num_flows_node++;
242
243 }
244
245 printf("Hash table: Adding 0x%x keys\n", num_flows_node);
246 }
247 /* >8 End of creation of hash table. */
248
249 /*
250 * This function performs routing of packets
251 * Just sends each input packet out an output port based solely on the input
252 * port it arrived on.
253 */
254 static inline void
transmit_packet(struct rte_mbuf * buf)255 transmit_packet(struct rte_mbuf *buf)
256 {
257 int sent;
258 const uint16_t in_port = buf->port;
259 const uint16_t out_port = output_ports[in_port];
260 struct rte_eth_dev_tx_buffer *buffer = tx_buffer[out_port];
261
262 sent = rte_eth_tx_buffer(out_port, node_id, buffer, buf);
263 if (sent)
264 tx_stats->tx[out_port] += sent;
265
266 }
267
268 /* Packets dequeued from the shared ring. 8< */
269 static inline void
handle_packets(struct rte_hash * h,struct rte_mbuf ** bufs,uint16_t num_packets)270 handle_packets(struct rte_hash *h, struct rte_mbuf **bufs, uint16_t num_packets)
271 {
272 struct rte_ipv4_hdr *ipv4_hdr;
273 uint32_t ipv4_dst_ip[PKT_READ_SIZE];
274 const void *key_ptrs[PKT_READ_SIZE];
275 unsigned int i;
276 int32_t positions[PKT_READ_SIZE] = {0};
277
278 for (i = 0; i < num_packets; i++) {
279 /* Handle IPv4 header.*/
280 ipv4_hdr = rte_pktmbuf_mtod_offset(bufs[i],
281 struct rte_ipv4_hdr *, sizeof(struct rte_ether_hdr));
282 ipv4_dst_ip[i] = ipv4_hdr->dst_addr;
283 key_ptrs[i] = &ipv4_dst_ip[i];
284 }
285 /* Check if packets belongs to any flows handled by this node */
286 rte_hash_lookup_bulk(h, key_ptrs, num_packets, positions);
287
288 for (i = 0; i < num_packets; i++) {
289 if (likely(positions[i] >= 0)) {
290 filter_stats->passed++;
291 transmit_packet(bufs[i]);
292 } else {
293 filter_stats->drop++;
294 /* Drop packet, as flow is not handled by this node */
295 rte_pktmbuf_free(bufs[i]);
296 }
297 }
298 }
299 /* >8 End of packets dequeuing. */
300
301 /*
302 * Application main function - loops through
303 * receiving and processing packets. Never returns
304 */
305 int
main(int argc,char * argv[])306 main(int argc, char *argv[])
307 {
308 const struct rte_memzone *mz;
309 struct rte_ring *rx_ring;
310 struct rte_hash *h;
311 struct rte_mempool *mp;
312 struct shared_info *info;
313 int need_flush = 0; /* indicates whether we have unsent packets */
314 int retval;
315 void *pkts[PKT_READ_SIZE];
316 uint16_t sent;
317
318 retval = rte_eal_init(argc, argv);
319 if (retval < 0)
320 return -1;
321 argc -= retval;
322 argv += retval;
323
324 if (parse_app_args(argc, argv) < 0)
325 rte_exit(EXIT_FAILURE, "Invalid command-line arguments\n");
326
327 if (rte_eth_dev_count_avail() == 0)
328 rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
329
330 /* Attaching to the server process memory. 8< */
331 rx_ring = rte_ring_lookup(get_rx_queue_name(node_id));
332 if (rx_ring == NULL)
333 rte_exit(EXIT_FAILURE, "Cannot get RX ring - "
334 "is server process running?\n");
335
336 mp = rte_mempool_lookup(PKTMBUF_POOL_NAME);
337 if (mp == NULL)
338 rte_exit(EXIT_FAILURE, "Cannot get mempool for mbufs\n");
339
340 mz = rte_memzone_lookup(MZ_SHARED_INFO);
341 if (mz == NULL)
342 rte_exit(EXIT_FAILURE, "Cannot get port info structure\n");
343 info = mz->addr;
344 tx_stats = &(info->tx_stats[node_id]);
345 filter_stats = &(info->filter_stats[node_id]);
346 /* >8 End of attaching to the server process memory. */
347
348 configure_output_ports(info);
349
350 h = create_hash_table(info);
351
352 populate_hash_table(h, info);
353
354 RTE_LOG(INFO, APP, "Finished Process Init.\n");
355
356 printf("\nNode process %d handling packets\n", node_id);
357 printf("[Press Ctrl-C to quit ...]\n");
358
359 for (;;) {
360 uint16_t rx_pkts = PKT_READ_SIZE;
361 uint16_t port;
362
363 /*
364 * Try dequeuing max possible packets first, if that fails,
365 * get the most we can. Loop body should only execute once,
366 * maximum
367 */
368 while (rx_pkts > 0 &&
369 unlikely(rte_ring_dequeue_bulk(rx_ring, pkts,
370 rx_pkts, NULL) == 0))
371 rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring),
372 PKT_READ_SIZE);
373
374 if (unlikely(rx_pkts == 0)) {
375 if (need_flush)
376 for (port = 0; port < info->num_ports; port++) {
377 sent = rte_eth_tx_buffer_flush(
378 info->id[port],
379 node_id,
380 tx_buffer[port]);
381 if (unlikely(sent))
382 tx_stats->tx[port] += sent;
383 }
384 need_flush = 0;
385 continue;
386 }
387
388 handle_packets(h, (struct rte_mbuf **)pkts, rx_pkts);
389
390 need_flush = 1;
391 }
392
393 /* clean up the EAL */
394 rte_eal_cleanup();
395 }
396