1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2015 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <inttypes.h> 7 #include <rte_eal.h> 8 #include <rte_ethdev.h> 9 #include <rte_cycles.h> 10 #include <rte_lcore.h> 11 #include <rte_mbuf.h> 12 13 #define RX_RING_SIZE 1024 14 #define TX_RING_SIZE 1024 15 16 #define NUM_MBUFS 8191 17 #define MBUF_CACHE_SIZE 250 18 #define BURST_SIZE 32 19 20 static const struct rte_eth_conf port_conf_default = { 21 .rxmode = { 22 .max_rx_pkt_len = ETHER_MAX_LEN, 23 }, 24 }; 25 26 static struct { 27 uint64_t total_cycles; 28 uint64_t total_pkts; 29 } latency_numbers; 30 31 32 static uint16_t 33 add_timestamps(uint16_t port __rte_unused, uint16_t qidx __rte_unused, 34 struct rte_mbuf **pkts, uint16_t nb_pkts, 35 uint16_t max_pkts __rte_unused, void *_ __rte_unused) 36 { 37 unsigned i; 38 uint64_t now = rte_rdtsc(); 39 40 for (i = 0; i < nb_pkts; i++) 41 pkts[i]->udata64 = now; 42 return nb_pkts; 43 } 44 45 static uint16_t 46 calc_latency(uint16_t port __rte_unused, uint16_t qidx __rte_unused, 47 struct rte_mbuf **pkts, uint16_t nb_pkts, void *_ __rte_unused) 48 { 49 uint64_t cycles = 0; 50 uint64_t now = rte_rdtsc(); 51 unsigned i; 52 53 for (i = 0; i < nb_pkts; i++) 54 cycles += now - pkts[i]->udata64; 55 latency_numbers.total_cycles += cycles; 56 latency_numbers.total_pkts += nb_pkts; 57 58 if (latency_numbers.total_pkts > (100 * 1000 * 1000ULL)) { 59 printf("Latency = %"PRIu64" cycles\n", 60 latency_numbers.total_cycles / latency_numbers.total_pkts); 61 latency_numbers.total_cycles = latency_numbers.total_pkts = 0; 62 } 63 return nb_pkts; 64 } 65 66 /* 67 * Initialises a given port using global settings and with the rx buffers 68 * coming from the mbuf_pool passed as parameter 69 */ 70 static inline int 71 port_init(uint16_t port, struct rte_mempool *mbuf_pool) 72 { 73 struct rte_eth_conf port_conf = port_conf_default; 74 const uint16_t rx_rings = 1, tx_rings = 1; 75 uint16_t nb_rxd = RX_RING_SIZE; 76 uint16_t nb_txd = TX_RING_SIZE; 77 int retval; 78 uint16_t q; 79 struct rte_eth_dev_info dev_info; 80 struct rte_eth_txconf txconf; 81 82 if (!rte_eth_dev_is_valid_port(port)) 83 return -1; 84 85 rte_eth_dev_info_get(port, &dev_info); 86 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) 87 port_conf.txmode.offloads |= 88 DEV_TX_OFFLOAD_MBUF_FAST_FREE; 89 90 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); 91 if (retval != 0) 92 return retval; 93 94 retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd); 95 if (retval != 0) 96 return retval; 97 98 for (q = 0; q < rx_rings; q++) { 99 retval = rte_eth_rx_queue_setup(port, q, nb_rxd, 100 rte_eth_dev_socket_id(port), NULL, mbuf_pool); 101 if (retval < 0) 102 return retval; 103 } 104 105 txconf = dev_info.default_txconf; 106 txconf.offloads = port_conf.txmode.offloads; 107 for (q = 0; q < tx_rings; q++) { 108 retval = rte_eth_tx_queue_setup(port, q, nb_txd, 109 rte_eth_dev_socket_id(port), &txconf); 110 if (retval < 0) 111 return retval; 112 } 113 114 retval = rte_eth_dev_start(port); 115 if (retval < 0) 116 return retval; 117 118 struct ether_addr addr; 119 120 rte_eth_macaddr_get(port, &addr); 121 printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 122 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", 123 (unsigned)port, 124 addr.addr_bytes[0], addr.addr_bytes[1], 125 addr.addr_bytes[2], addr.addr_bytes[3], 126 addr.addr_bytes[4], addr.addr_bytes[5]); 127 128 rte_eth_promiscuous_enable(port); 129 rte_eth_add_rx_callback(port, 0, add_timestamps, NULL); 130 rte_eth_add_tx_callback(port, 0, calc_latency, NULL); 131 132 return 0; 133 } 134 135 /* 136 * Main thread that does the work, reading from INPUT_PORT 137 * and writing to OUTPUT_PORT 138 */ 139 static __attribute__((noreturn)) void 140 lcore_main(void) 141 { 142 uint16_t port; 143 144 RTE_ETH_FOREACH_DEV(port) 145 if (rte_eth_dev_socket_id(port) > 0 && 146 rte_eth_dev_socket_id(port) != 147 (int)rte_socket_id()) 148 printf("WARNING, port %u is on remote NUMA node to " 149 "polling thread.\n\tPerformance will " 150 "not be optimal.\n", port); 151 152 printf("\nCore %u forwarding packets. [Ctrl+C to quit]\n", 153 rte_lcore_id()); 154 for (;;) { 155 RTE_ETH_FOREACH_DEV(port) { 156 struct rte_mbuf *bufs[BURST_SIZE]; 157 const uint16_t nb_rx = rte_eth_rx_burst(port, 0, 158 bufs, BURST_SIZE); 159 if (unlikely(nb_rx == 0)) 160 continue; 161 const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0, 162 bufs, nb_rx); 163 if (unlikely(nb_tx < nb_rx)) { 164 uint16_t buf; 165 166 for (buf = nb_tx; buf < nb_rx; buf++) 167 rte_pktmbuf_free(bufs[buf]); 168 } 169 } 170 } 171 } 172 173 /* Main function, does initialisation and calls the per-lcore functions */ 174 int 175 main(int argc, char *argv[]) 176 { 177 struct rte_mempool *mbuf_pool; 178 uint16_t nb_ports; 179 uint16_t portid; 180 181 /* init EAL */ 182 int ret = rte_eal_init(argc, argv); 183 184 if (ret < 0) 185 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n"); 186 argc -= ret; 187 argv += ret; 188 189 nb_ports = rte_eth_dev_count_avail(); 190 if (nb_ports < 2 || (nb_ports & 1)) 191 rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n"); 192 193 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", 194 NUM_MBUFS * nb_ports, MBUF_CACHE_SIZE, 0, 195 RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id()); 196 if (mbuf_pool == NULL) 197 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); 198 199 /* initialize all ports */ 200 RTE_ETH_FOREACH_DEV(portid) 201 if (port_init(portid, mbuf_pool) != 0) 202 rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8"\n", 203 portid); 204 205 if (rte_lcore_count() > 1) 206 printf("\nWARNING: Too much enabled lcores - " 207 "App uses only 1 lcore\n"); 208 209 /* call lcore_main on master core only */ 210 lcore_main(); 211 return 0; 212 } 213