1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
3 */
4
5 #include <stdint.h>
6 #include <sys/queue.h>
7 #include <sys/socket.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <stdio.h>
11 #include <assert.h>
12 #include <errno.h>
13 #include <signal.h>
14 #include <stdarg.h>
15 #include <inttypes.h>
16 #include <getopt.h>
17 #include <termios.h>
18 #include <unistd.h>
19 #include <pthread.h>
20
21 #include <rte_common.h>
22 #include <rte_log.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_eal.h>
26 #include <rte_launch.h>
27 #include <rte_cycles.h>
28 #include <rte_prefetch.h>
29 #include <rte_lcore.h>
30 #include <rte_per_lcore.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_interrupts.h>
33 #include <rte_random.h>
34 #include <rte_debug.h>
35 #include <rte_ether.h>
36 #include <rte_ethdev.h>
37 #include <rte_mempool.h>
38 #include <rte_mbuf.h>
39 #include <rte_ip.h>
40 #include <rte_tcp.h>
41 #include <rte_arp.h>
42 #include <rte_spinlock.h>
43 #include <rte_devargs.h>
44 #include <rte_byteorder.h>
45 #include <rte_cpuflags.h>
46 #include <rte_eth_bond.h>
47
48 #include <cmdline_socket.h>
49 #include "commands.h"
50
51 #define RTE_LOGTYPE_DCB RTE_LOGTYPE_USER1
52
53 #define NB_MBUF (1024*8)
54
55 #define MAX_PKT_BURST 32
56 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
57 #define BURST_RX_INTERVAL_NS (10) /* RX poll interval ~100ns */
58
59 /*
60 * RX and TX Prefetch, Host, and Write-back threshold values should be
61 * carefully set for optimal performance. Consult the network
62 * controller's datasheet and supporting DPDK documentation for guidance
63 * on how these parameters should be set.
64 */
65 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
66 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
67 #define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
68 #define RX_FTHRESH (MAX_PKT_BURST * 2)/**< Default values of RX free threshold reg. */
69
70 /*
71 * These default values are optimized for use with the Intel(R) 82599 10 GbE
72 * Controller and the DPDK ixgbe PMD. Consider using other values for other
73 * network controllers and/or network drivers.
74 */
75 #define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
76 #define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
77 #define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
78
79 /*
80 * Configurable number of RX/TX ring descriptors
81 */
82 #define RTE_RX_DESC_DEFAULT 1024
83 #define RTE_TX_DESC_DEFAULT 1024
84
85 #define BOND_IP_1 7
86 #define BOND_IP_2 0
87 #define BOND_IP_3 0
88 #define BOND_IP_4 10
89
90 /* not defined under linux */
91 #ifndef NIPQUAD
92 #define NIPQUAD_FMT "%u.%u.%u.%u"
93 #endif
94
95 #define MAX_PORTS 4
96 #define PRINT_MAC(addr) printf("%02"PRIx8":%02"PRIx8":%02"PRIx8 \
97 ":%02"PRIx8":%02"PRIx8":%02"PRIx8, \
98 RTE_ETHER_ADDR_BYTES(&addr))
99
100 uint16_t members[RTE_MAX_ETHPORTS];
101 uint16_t members_count;
102
103 static uint16_t BOND_PORT = 0xffff;
104
105 static struct rte_mempool *mbuf_pool;
106
107 static struct rte_eth_conf port_conf = {
108 .rxmode = {
109 .mq_mode = RTE_ETH_MQ_RX_NONE,
110 },
111 .rx_adv_conf = {
112 .rss_conf = {
113 .rss_key = NULL,
114 .rss_hf = RTE_ETH_RSS_IP,
115 },
116 },
117 .txmode = {
118 .mq_mode = RTE_ETH_MQ_TX_NONE,
119 },
120 };
121
122 static void
member_port_init(uint16_t portid,struct rte_mempool * mbuf_pool)123 member_port_init(uint16_t portid, struct rte_mempool *mbuf_pool)
124 {
125 int retval;
126 uint16_t nb_rxd = RTE_RX_DESC_DEFAULT;
127 uint16_t nb_txd = RTE_TX_DESC_DEFAULT;
128 struct rte_eth_dev_info dev_info;
129 struct rte_eth_rxconf rxq_conf;
130 struct rte_eth_txconf txq_conf;
131 struct rte_eth_conf local_port_conf = port_conf;
132
133 if (!rte_eth_dev_is_valid_port(portid))
134 rte_exit(EXIT_FAILURE, "Invalid port\n");
135
136 retval = rte_eth_dev_info_get(portid, &dev_info);
137 if (retval != 0)
138 rte_exit(EXIT_FAILURE,
139 "Error during getting device (port %u) info: %s\n",
140 portid, strerror(-retval));
141
142 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
143 local_port_conf.txmode.offloads |=
144 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
145
146 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
147 dev_info.flow_type_rss_offloads;
148 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
149 port_conf.rx_adv_conf.rss_conf.rss_hf) {
150 printf("Port %u modified RSS hash function based on hardware support,"
151 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
152 portid,
153 port_conf.rx_adv_conf.rss_conf.rss_hf,
154 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
155 }
156
157 retval = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
158 if (retval != 0)
159 rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
160 portid, retval);
161
162 retval = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
163 if (retval != 0)
164 rte_exit(EXIT_FAILURE, "port %u: rte_eth_dev_adjust_nb_rx_tx_desc "
165 "failed (res=%d)\n", portid, retval);
166
167 /* RX setup */
168 rxq_conf = dev_info.default_rxconf;
169 rxq_conf.offloads = local_port_conf.rxmode.offloads;
170 retval = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
171 rte_eth_dev_socket_id(portid),
172 &rxq_conf,
173 mbuf_pool);
174 if (retval < 0)
175 rte_exit(retval, " port %u: RX queue 0 setup failed (res=%d)",
176 portid, retval);
177
178 /* TX setup */
179 txq_conf = dev_info.default_txconf;
180 txq_conf.offloads = local_port_conf.txmode.offloads;
181 retval = rte_eth_tx_queue_setup(portid, 0, nb_txd,
182 rte_eth_dev_socket_id(portid), &txq_conf);
183
184 if (retval < 0)
185 rte_exit(retval, "port %u: TX queue 0 setup failed (res=%d)",
186 portid, retval);
187
188 retval = rte_eth_dev_start(portid);
189 if (retval < 0)
190 rte_exit(retval,
191 "Start port %d failed (res=%d)",
192 portid, retval);
193
194 struct rte_ether_addr addr;
195
196 retval = rte_eth_macaddr_get(portid, &addr);
197 if (retval != 0)
198 rte_exit(retval,
199 "Mac address get port %d failed (res=%d)",
200 portid, retval);
201
202 printf("Port %u MAC: ", portid);
203 PRINT_MAC(addr);
204 printf("\n");
205 }
206
207 static void
bond_port_init(struct rte_mempool * mbuf_pool)208 bond_port_init(struct rte_mempool *mbuf_pool)
209 {
210 int retval;
211 uint8_t i;
212 uint16_t nb_rxd = RTE_RX_DESC_DEFAULT;
213 uint16_t nb_txd = RTE_TX_DESC_DEFAULT;
214 struct rte_eth_dev_info dev_info;
215 struct rte_eth_rxconf rxq_conf;
216 struct rte_eth_txconf txq_conf;
217 struct rte_eth_conf local_port_conf = port_conf;
218 uint16_t wait_counter = 20;
219
220 retval = rte_eth_bond_create("net_bonding0", BONDING_MODE_ALB,
221 0 /*SOCKET_ID_ANY*/);
222 if (retval < 0)
223 rte_exit(EXIT_FAILURE,
224 "Failed to create bond port\n");
225
226 BOND_PORT = retval;
227
228 retval = rte_eth_dev_info_get(BOND_PORT, &dev_info);
229 if (retval != 0)
230 rte_exit(EXIT_FAILURE,
231 "Error during getting device (port %u) info: %s\n",
232 BOND_PORT, strerror(-retval));
233
234 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
235 local_port_conf.txmode.offloads |=
236 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
237 retval = rte_eth_dev_configure(BOND_PORT, 1, 1, &local_port_conf);
238 if (retval != 0)
239 rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
240 BOND_PORT, retval);
241
242 retval = rte_eth_dev_adjust_nb_rx_tx_desc(BOND_PORT, &nb_rxd, &nb_txd);
243 if (retval != 0)
244 rte_exit(EXIT_FAILURE, "port %u: rte_eth_dev_adjust_nb_rx_tx_desc "
245 "failed (res=%d)\n", BOND_PORT, retval);
246
247 for (i = 0; i < members_count; i++) {
248 if (rte_eth_bond_member_add(BOND_PORT, members[i]) == -1)
249 rte_exit(-1, "Oooops! adding member (%u) to bond (%u) failed!\n",
250 members[i], BOND_PORT);
251
252 }
253
254 /* RX setup */
255 rxq_conf = dev_info.default_rxconf;
256 rxq_conf.offloads = local_port_conf.rxmode.offloads;
257 retval = rte_eth_rx_queue_setup(BOND_PORT, 0, nb_rxd,
258 rte_eth_dev_socket_id(BOND_PORT),
259 &rxq_conf, mbuf_pool);
260 if (retval < 0)
261 rte_exit(retval, " port %u: RX queue 0 setup failed (res=%d)",
262 BOND_PORT, retval);
263
264 /* TX setup */
265 txq_conf = dev_info.default_txconf;
266 txq_conf.offloads = local_port_conf.txmode.offloads;
267 retval = rte_eth_tx_queue_setup(BOND_PORT, 0, nb_txd,
268 rte_eth_dev_socket_id(BOND_PORT), &txq_conf);
269
270 if (retval < 0)
271 rte_exit(retval, "port %u: TX queue 0 setup failed (res=%d)",
272 BOND_PORT, retval);
273
274 retval = rte_eth_dev_start(BOND_PORT);
275 if (retval < 0)
276 rte_exit(retval, "Start port %d failed (res=%d)", BOND_PORT, retval);
277
278 printf("Waiting for members to become active...");
279 while (wait_counter) {
280 uint16_t act_members[16] = {0};
281 if (rte_eth_bond_active_members_get(BOND_PORT, act_members, 16) ==
282 members_count) {
283 printf("\n");
284 break;
285 }
286 sleep(1);
287 printf("...");
288 if (--wait_counter == 0)
289 rte_exit(-1, "\nFailed to activate members\n");
290 }
291
292 retval = rte_eth_promiscuous_enable(BOND_PORT);
293 if (retval != 0) {
294 rte_exit(EXIT_FAILURE,
295 "port %u: promiscuous mode enable failed: %s\n",
296 BOND_PORT, rte_strerror(-retval));
297 return;
298 }
299
300 struct rte_ether_addr addr;
301
302 retval = rte_eth_macaddr_get(BOND_PORT, &addr);
303 if (retval != 0)
304 rte_exit(retval, "port %u: Mac address get failed (res=%d)",
305 BOND_PORT, retval);
306
307 printf("Port %u MAC: ", (unsigned)BOND_PORT);
308 PRINT_MAC(addr);
309 printf("\n");
310 }
311
312 static inline size_t
get_vlan_offset(struct rte_ether_hdr * eth_hdr,uint16_t * proto)313 get_vlan_offset(struct rte_ether_hdr *eth_hdr, uint16_t *proto)
314 {
315 size_t vlan_offset = 0;
316
317 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto) {
318 struct rte_vlan_hdr *vlan_hdr =
319 (struct rte_vlan_hdr *)(eth_hdr + 1);
320
321 vlan_offset = sizeof(struct rte_vlan_hdr);
322 *proto = vlan_hdr->eth_proto;
323
324 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto) {
325 vlan_hdr = vlan_hdr + 1;
326
327 *proto = vlan_hdr->eth_proto;
328 vlan_offset += sizeof(struct rte_vlan_hdr);
329 }
330 }
331 return vlan_offset;
332 }
333
334 struct global_flag_stru_t {
335 int LcoreMainIsRunning;
336 int LcoreMainCore;
337 uint32_t port_packets[4];
338 rte_spinlock_t lock;
339 };
340 struct global_flag_stru_t global_flag_stru;
341 struct global_flag_stru_t *global_flag_stru_p = &global_flag_stru;
342
343 /*
344 * Main thread that does the work, reading from INPUT_PORT
345 * and writing to OUTPUT_PORT
346 */
lcore_main(__rte_unused void * arg1)347 static int lcore_main(__rte_unused void *arg1)
348 {
349 alignas(RTE_CACHE_LINE_SIZE) struct rte_mbuf *pkts[MAX_PKT_BURST];
350 struct rte_ether_addr dst_addr;
351
352 struct rte_ether_addr bond_mac_addr;
353 struct rte_ether_hdr *eth_hdr;
354 struct rte_arp_hdr *arp_hdr;
355 struct rte_ipv4_hdr *ipv4_hdr;
356 uint16_t ether_type, offset;
357
358 uint16_t rx_cnt;
359 uint32_t bond_ip;
360 int i = 0;
361 uint8_t is_free;
362 int ret;
363
364 bond_ip = BOND_IP_1 | (BOND_IP_2 << 8) |
365 (BOND_IP_3 << 16) | (BOND_IP_4 << 24);
366
367 rte_spinlock_lock(&global_flag_stru_p->lock);
368
369 while (global_flag_stru_p->LcoreMainIsRunning) {
370 rte_spinlock_unlock(&global_flag_stru_p->lock);
371 rx_cnt = rte_eth_rx_burst(BOND_PORT, 0, pkts, MAX_PKT_BURST);
372 is_free = 0;
373
374 /* If didn't receive any packets, wait and go to next iteration */
375 if (rx_cnt == 0) {
376 rte_delay_us(50);
377 continue;
378 }
379
380 ret = rte_eth_macaddr_get(BOND_PORT, &bond_mac_addr);
381 if (ret != 0) {
382 printf("Bond (port %u) MAC address get failed: %s.\n"
383 "%u packets dropped", BOND_PORT, strerror(-ret),
384 rx_cnt);
385 rte_pktmbuf_free(pkts[i]);
386 continue;
387 }
388
389 /* Search incoming data for ARP packets and prepare response */
390 for (i = 0; i < rx_cnt; i++) {
391 if (rte_spinlock_trylock(&global_flag_stru_p->lock) == 1) {
392 global_flag_stru_p->port_packets[0]++;
393 rte_spinlock_unlock(&global_flag_stru_p->lock);
394 }
395 eth_hdr = rte_pktmbuf_mtod(pkts[i],
396 struct rte_ether_hdr *);
397 ether_type = eth_hdr->ether_type;
398 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))
399 printf("VLAN tagged frame, offset:");
400 offset = get_vlan_offset(eth_hdr, ðer_type);
401 if (offset > 0)
402 printf("%d\n", offset);
403 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
404 if (rte_spinlock_trylock(&global_flag_stru_p->lock) == 1) {
405 global_flag_stru_p->port_packets[1]++;
406 rte_spinlock_unlock(&global_flag_stru_p->lock);
407 }
408 arp_hdr = (struct rte_arp_hdr *)(
409 (char *)(eth_hdr + 1) + offset);
410 if (arp_hdr->arp_data.arp_tip == bond_ip) {
411 if (arp_hdr->arp_opcode == rte_cpu_to_be_16(RTE_ARP_OP_REQUEST)) {
412 arp_hdr->arp_opcode = rte_cpu_to_be_16(RTE_ARP_OP_REPLY);
413 /* Switch src and dst data and set bonding MAC */
414 rte_ether_addr_copy(ð_hdr->src_addr, ð_hdr->dst_addr);
415 rte_ether_addr_copy(&bond_mac_addr, ð_hdr->src_addr);
416 rte_ether_addr_copy(&arp_hdr->arp_data.arp_sha,
417 &arp_hdr->arp_data.arp_tha);
418 arp_hdr->arp_data.arp_tip = arp_hdr->arp_data.arp_sip;
419 rte_ether_addr_copy(&bond_mac_addr, &dst_addr);
420 rte_ether_addr_copy(&dst_addr, &arp_hdr->arp_data.arp_sha);
421 arp_hdr->arp_data.arp_sip = bond_ip;
422 rte_eth_tx_burst(BOND_PORT, 0, &pkts[i], 1);
423 is_free = 1;
424 } else {
425 rte_eth_tx_burst(BOND_PORT, 0, NULL, 0);
426 }
427 }
428 } else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
429 if (rte_spinlock_trylock(&global_flag_stru_p->lock) == 1) {
430 global_flag_stru_p->port_packets[2]++;
431 rte_spinlock_unlock(&global_flag_stru_p->lock);
432 }
433 ipv4_hdr = (struct rte_ipv4_hdr *)((char *)(eth_hdr + 1) + offset);
434 if (ipv4_hdr->dst_addr == bond_ip) {
435 rte_ether_addr_copy(ð_hdr->src_addr,
436 ð_hdr->dst_addr);
437 rte_ether_addr_copy(&bond_mac_addr,
438 ð_hdr->src_addr);
439 ipv4_hdr->dst_addr = ipv4_hdr->src_addr;
440 ipv4_hdr->src_addr = bond_ip;
441 rte_eth_tx_burst(BOND_PORT, 0, &pkts[i], 1);
442 }
443
444 }
445
446 /* Free processed packets */
447 if (is_free == 0)
448 rte_pktmbuf_free(pkts[i]);
449 }
450 rte_spinlock_lock(&global_flag_stru_p->lock);
451 }
452 rte_spinlock_unlock(&global_flag_stru_p->lock);
453 printf("BYE lcore_main\n");
454 return 0;
455 }
456
get_string(struct cmd_send_result * res,char * buf,uint8_t size)457 static inline void get_string(struct cmd_send_result *res, char *buf, uint8_t size)
458 {
459 snprintf(buf, size, NIPQUAD_FMT,
460 ((unsigned)((unsigned char *)&(res->ip.addr.ipv4))[0]),
461 ((unsigned)((unsigned char *)&(res->ip.addr.ipv4))[1]),
462 ((unsigned)((unsigned char *)&(res->ip.addr.ipv4))[2]),
463 ((unsigned)((unsigned char *)&(res->ip.addr.ipv4))[3])
464 );
465 }
466 void
cmd_send_parsed(void * parsed_result,__rte_unused struct cmdline * cl,__rte_unused void * data)467 cmd_send_parsed(void *parsed_result, __rte_unused struct cmdline *cl, __rte_unused void *data)
468 {
469
470 struct cmd_send_result *res = parsed_result;
471 char ip_str[INET6_ADDRSTRLEN];
472
473 struct rte_ether_addr bond_mac_addr;
474 struct rte_mbuf *created_pkt;
475 struct rte_ether_hdr *eth_hdr;
476 struct rte_arp_hdr *arp_hdr;
477
478 uint32_t bond_ip;
479 size_t pkt_size;
480 int ret;
481
482 if (res->ip.family == AF_INET)
483 get_string(res, ip_str, INET_ADDRSTRLEN);
484 else
485 cmdline_printf(cl, "Wrong IP format. Only IPv4 is supported\n");
486
487 bond_ip = BOND_IP_1 | (BOND_IP_2 << 8) |
488 (BOND_IP_3 << 16) | (BOND_IP_4 << 24);
489
490 ret = rte_eth_macaddr_get(BOND_PORT, &bond_mac_addr);
491 if (ret != 0) {
492 cmdline_printf(cl,
493 "Failed to get bond (port %u) MAC address: %s\n",
494 BOND_PORT, strerror(-ret));
495 }
496
497 created_pkt = rte_pktmbuf_alloc(mbuf_pool);
498 if (created_pkt == NULL) {
499 cmdline_printf(cl, "Failed to allocate mbuf\n");
500 return;
501 }
502
503 pkt_size = sizeof(struct rte_ether_hdr) + sizeof(struct rte_arp_hdr);
504 created_pkt->data_len = pkt_size;
505 created_pkt->pkt_len = pkt_size;
506
507 eth_hdr = rte_pktmbuf_mtod(created_pkt, struct rte_ether_hdr *);
508 rte_ether_addr_copy(&bond_mac_addr, ð_hdr->src_addr);
509 memset(ð_hdr->dst_addr, 0xFF, RTE_ETHER_ADDR_LEN);
510 eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP);
511
512 arp_hdr = (struct rte_arp_hdr *)(
513 (char *)eth_hdr + sizeof(struct rte_ether_hdr));
514 arp_hdr->arp_hardware = rte_cpu_to_be_16(RTE_ARP_HRD_ETHER);
515 arp_hdr->arp_protocol = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
516 arp_hdr->arp_hlen = RTE_ETHER_ADDR_LEN;
517 arp_hdr->arp_plen = sizeof(uint32_t);
518 arp_hdr->arp_opcode = rte_cpu_to_be_16(RTE_ARP_OP_REQUEST);
519
520 rte_ether_addr_copy(&bond_mac_addr, &arp_hdr->arp_data.arp_sha);
521 arp_hdr->arp_data.arp_sip = bond_ip;
522 memset(&arp_hdr->arp_data.arp_tha, 0, RTE_ETHER_ADDR_LEN);
523 arp_hdr->arp_data.arp_tip =
524 ((unsigned char *)&res->ip.addr.ipv4)[0] |
525 (((unsigned char *)&res->ip.addr.ipv4)[1] << 8) |
526 (((unsigned char *)&res->ip.addr.ipv4)[2] << 16) |
527 (((unsigned char *)&res->ip.addr.ipv4)[3] << 24);
528 rte_eth_tx_burst(BOND_PORT, 0, &created_pkt, 1);
529
530 rte_delay_ms(100);
531 cmdline_printf(cl, "\n");
532 }
533
534 void
cmd_start_parsed(__rte_unused void * parsed_result,struct cmdline * cl,__rte_unused void * data)535 cmd_start_parsed(__rte_unused void *parsed_result, struct cmdline *cl, __rte_unused void *data)
536 {
537 int worker_core_id = rte_lcore_id();
538
539 rte_spinlock_lock(&global_flag_stru_p->lock);
540 if (global_flag_stru_p->LcoreMainIsRunning == 0) {
541 if (rte_eal_get_lcore_state(global_flag_stru_p->LcoreMainCore)
542 != WAIT) {
543 rte_spinlock_unlock(&global_flag_stru_p->lock);
544 return;
545 }
546 rte_spinlock_unlock(&global_flag_stru_p->lock);
547 } else {
548 cmdline_printf(cl, "lcore_main already running on core:%d\n",
549 global_flag_stru_p->LcoreMainCore);
550 rte_spinlock_unlock(&global_flag_stru_p->lock);
551 return;
552 }
553
554 /* start lcore main on core != main_core - ARP response thread */
555 worker_core_id = rte_get_next_lcore(rte_lcore_id(), 1, 0);
556 if ((worker_core_id >= RTE_MAX_LCORE) || (worker_core_id == 0))
557 return;
558
559 rte_spinlock_lock(&global_flag_stru_p->lock);
560 global_flag_stru_p->LcoreMainIsRunning = 1;
561 rte_spinlock_unlock(&global_flag_stru_p->lock);
562 cmdline_printf(cl,
563 "Starting lcore_main on core %d:%d "
564 "Our IP:%d.%d.%d.%d\n",
565 worker_core_id,
566 rte_eal_remote_launch(lcore_main, NULL, worker_core_id),
567 BOND_IP_1,
568 BOND_IP_2,
569 BOND_IP_3,
570 BOND_IP_4
571 );
572 }
573
574 void
cmd_help_parsed(__rte_unused void * parsed_result,struct cmdline * cl,__rte_unused void * data)575 cmd_help_parsed(__rte_unused void *parsed_result, struct cmdline *cl, __rte_unused void *data)
576 {
577 cmdline_printf(cl,
578 "ALB - link bonding mode 6 example\n"
579 "send IP - sends one ARPrequest through bonding for IP.\n"
580 "start - starts listening ARPs.\n"
581 "stop - stops lcore_main.\n"
582 "show - shows some bond info: ex. active members etc.\n"
583 "help - prints help.\n"
584 "quit - terminate all threads and quit.\n"
585 );
586 }
587
588 void
cmd_stop_parsed(__rte_unused void * parsed_result,struct cmdline * cl,__rte_unused void * data)589 cmd_stop_parsed(__rte_unused void *parsed_result, struct cmdline *cl, __rte_unused void *data)
590 {
591 rte_spinlock_lock(&global_flag_stru_p->lock);
592 if (global_flag_stru_p->LcoreMainIsRunning == 0) {
593 cmdline_printf(cl,
594 "lcore_main not running on core:%d\n",
595 global_flag_stru_p->LcoreMainCore);
596 rte_spinlock_unlock(&global_flag_stru_p->lock);
597 return;
598 }
599 global_flag_stru_p->LcoreMainIsRunning = 0;
600 if (rte_eal_wait_lcore(global_flag_stru_p->LcoreMainCore) < 0)
601 cmdline_printf(cl,
602 "error: lcore_main can not stop on core:%d\n",
603 global_flag_stru_p->LcoreMainCore);
604 else
605 cmdline_printf(cl,
606 "lcore_main stopped on core:%d\n",
607 global_flag_stru_p->LcoreMainCore);
608 rte_spinlock_unlock(&global_flag_stru_p->lock);
609 }
610
611 void
cmd_quit_parsed(__rte_unused void * parsed_result,struct cmdline * cl,__rte_unused void * data)612 cmd_quit_parsed(__rte_unused void *parsed_result, struct cmdline *cl, __rte_unused void *data)
613 {
614 rte_spinlock_lock(&global_flag_stru_p->lock);
615 if (global_flag_stru_p->LcoreMainIsRunning == 0) {
616 cmdline_printf(cl,
617 "lcore_main not running on core:%d\n",
618 global_flag_stru_p->LcoreMainCore);
619 rte_spinlock_unlock(&global_flag_stru_p->lock);
620 cmdline_quit(cl);
621 return;
622 }
623 global_flag_stru_p->LcoreMainIsRunning = 0;
624 if (rte_eal_wait_lcore(global_flag_stru_p->LcoreMainCore) < 0)
625 cmdline_printf(cl,
626 "error: lcore_main can not stop on core:%d\n",
627 global_flag_stru_p->LcoreMainCore);
628 else
629 cmdline_printf(cl,
630 "lcore_main stopped on core:%d\n",
631 global_flag_stru_p->LcoreMainCore);
632 rte_spinlock_unlock(&global_flag_stru_p->lock);
633 cmdline_quit(cl);
634 }
635
636 void
cmd_show_parsed(__rte_unused void * parsed_result,struct cmdline * cl,__rte_unused void * data)637 cmd_show_parsed(__rte_unused void *parsed_result, struct cmdline *cl, __rte_unused void *data)
638 {
639 uint16_t members[16] = {0};
640 uint8_t len = 16;
641 struct rte_ether_addr addr;
642 uint16_t i;
643 int ret;
644
645 for (i = 0; i < members_count; i++) {
646 ret = rte_eth_macaddr_get(i, &addr);
647 if (ret != 0) {
648 cmdline_printf(cl,
649 "Failed to get port %u MAC address: %s\n",
650 i, strerror(-ret));
651 continue;
652 }
653
654 PRINT_MAC(addr);
655 printf("\n");
656 }
657
658 rte_spinlock_lock(&global_flag_stru_p->lock);
659 cmdline_printf(cl,
660 "Active_members:%d "
661 "packets received:Tot:%d Arp:%d IPv4:%d\n",
662 rte_eth_bond_active_members_get(BOND_PORT, members, len),
663 global_flag_stru_p->port_packets[0],
664 global_flag_stru_p->port_packets[1],
665 global_flag_stru_p->port_packets[2]);
666 rte_spinlock_unlock(&global_flag_stru_p->lock);
667 }
668
669 /* prompt function, called from main on MAIN lcore */
prompt(__rte_unused void * arg1)670 static void prompt(__rte_unused void *arg1)
671 {
672 struct cmdline *cl;
673
674 cl = cmdline_stdin_new(main_ctx, "bond6>");
675 if (cl != NULL) {
676 cmdline_interact(cl);
677 cmdline_stdin_exit(cl);
678 }
679 }
680
681 /* Main function, does initialisation and calls the per-lcore functions */
682 int
main(int argc,char * argv[])683 main(int argc, char *argv[])
684 {
685 int ret, worker_core_id;
686 uint16_t nb_ports, i;
687
688 /* init EAL */
689 ret = rte_eal_init(argc, argv);
690 rte_devargs_dump(stdout);
691 if (ret < 0)
692 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
693 argc -= ret;
694 argv += ret;
695
696 nb_ports = rte_eth_dev_count_avail();
697 if (nb_ports == 0)
698 rte_exit(EXIT_FAILURE, "Give at least one port\n");
699 else if (nb_ports > MAX_PORTS)
700 rte_exit(EXIT_FAILURE, "You can have max 4 ports\n");
701
702 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NB_MBUF, 32,
703 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
704 if (mbuf_pool == NULL)
705 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
706
707 /* initialize all ports */
708 members_count = nb_ports;
709 RTE_ETH_FOREACH_DEV(i) {
710 member_port_init(i, mbuf_pool);
711 members[i] = i;
712 }
713
714 bond_port_init(mbuf_pool);
715
716 rte_spinlock_init(&global_flag_stru_p->lock);
717
718 /* check state of lcores */
719 RTE_LCORE_FOREACH_WORKER(worker_core_id) {
720 if (rte_eal_get_lcore_state(worker_core_id) != WAIT)
721 return -EBUSY;
722 }
723
724 /* start lcore main on core != main_core - ARP response thread */
725 worker_core_id = rte_get_next_lcore(rte_lcore_id(), 1, 0);
726 if ((worker_core_id >= RTE_MAX_LCORE) || (worker_core_id == 0))
727 return -EPERM;
728
729 global_flag_stru_p->LcoreMainIsRunning = 1;
730 global_flag_stru_p->LcoreMainCore = worker_core_id;
731 printf("Starting lcore_main on core %d:%d Our IP:%d.%d.%d.%d\n",
732 worker_core_id,
733 rte_eal_remote_launch((lcore_function_t *)lcore_main,
734 NULL,
735 worker_core_id),
736 BOND_IP_1,
737 BOND_IP_2,
738 BOND_IP_3,
739 BOND_IP_4
740 );
741
742 /* Start prompt for user interact */
743 prompt(NULL);
744
745 rte_delay_ms(100);
746
747 /* clean up the EAL */
748 rte_eal_cleanup();
749
750 return 0;
751 }
752