1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2015 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <inttypes.h> 7 #include <getopt.h> 8 #include <rte_eal.h> 9 #include <rte_ethdev.h> 10 #include <rte_cycles.h> 11 #include <rte_lcore.h> 12 #include <rte_mbuf.h> 13 14 #define RX_RING_SIZE 1024 15 #define TX_RING_SIZE 1024 16 17 #define NUM_MBUFS 8191 18 #define MBUF_CACHE_SIZE 250 19 #define BURST_SIZE 32 20 21 static const char usage[] = 22 "%s EAL_ARGS -- [-t]\n"; 23 24 static const struct rte_eth_conf port_conf_default = { 25 .rxmode = { 26 .max_rx_pkt_len = RTE_ETHER_MAX_LEN, 27 }, 28 }; 29 30 static struct { 31 uint64_t total_cycles; 32 uint64_t total_queue_cycles; 33 uint64_t total_pkts; 34 } latency_numbers; 35 36 int hw_timestamping; 37 38 #define TICKS_PER_CYCLE_SHIFT 16 39 static uint64_t ticks_per_cycle_mult; 40 41 static uint16_t 42 add_timestamps(uint16_t port __rte_unused, uint16_t qidx __rte_unused, 43 struct rte_mbuf **pkts, uint16_t nb_pkts, 44 uint16_t max_pkts __rte_unused, void *_ __rte_unused) 45 { 46 unsigned i; 47 uint64_t now = rte_rdtsc(); 48 49 for (i = 0; i < nb_pkts; i++) 50 pkts[i]->udata64 = now; 51 return nb_pkts; 52 } 53 54 static uint16_t 55 calc_latency(uint16_t port, uint16_t qidx __rte_unused, 56 struct rte_mbuf **pkts, uint16_t nb_pkts, void *_ __rte_unused) 57 { 58 uint64_t cycles = 0; 59 uint64_t queue_ticks = 0; 60 uint64_t now = rte_rdtsc(); 61 uint64_t ticks; 62 unsigned i; 63 64 if (hw_timestamping) 65 rte_eth_read_clock(port, &ticks); 66 67 for (i = 0; i < nb_pkts; i++) { 68 cycles += now - pkts[i]->udata64; 69 if (hw_timestamping) 70 queue_ticks += ticks - pkts[i]->timestamp; 71 } 72 73 latency_numbers.total_cycles += cycles; 74 if (hw_timestamping) 75 latency_numbers.total_queue_cycles += (queue_ticks 76 * ticks_per_cycle_mult) >> TICKS_PER_CYCLE_SHIFT; 77 78 latency_numbers.total_pkts += nb_pkts; 79 80 if (latency_numbers.total_pkts > (100 * 1000 * 1000ULL)) { 81 printf("Latency = %"PRIu64" cycles\n", 82 latency_numbers.total_cycles / latency_numbers.total_pkts); 83 if (hw_timestamping) { 84 printf("Latency from HW = %"PRIu64" cycles\n", 85 latency_numbers.total_queue_cycles 86 / latency_numbers.total_pkts); 87 } 88 latency_numbers.total_cycles = 0; 89 latency_numbers.total_queue_cycles = 0; 90 latency_numbers.total_pkts = 0; 91 } 92 return nb_pkts; 93 } 94 95 /* 96 * Initialises a given port using global settings and with the rx buffers 97 * coming from the mbuf_pool passed as parameter 98 */ 99 static inline int 100 port_init(uint16_t port, struct rte_mempool *mbuf_pool) 101 { 102 struct rte_eth_conf port_conf = port_conf_default; 103 const uint16_t rx_rings = 1, tx_rings = 1; 104 uint16_t nb_rxd = RX_RING_SIZE; 105 uint16_t nb_txd = TX_RING_SIZE; 106 int retval; 107 uint16_t q; 108 struct rte_eth_dev_info dev_info; 109 struct rte_eth_rxconf rxconf; 110 struct rte_eth_txconf txconf; 111 112 if (!rte_eth_dev_is_valid_port(port)) 113 return -1; 114 115 rte_eth_dev_info_get(port, &dev_info); 116 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) 117 port_conf.txmode.offloads |= 118 DEV_TX_OFFLOAD_MBUF_FAST_FREE; 119 120 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); 121 if (retval != 0) 122 return retval; 123 124 retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd); 125 if (retval != 0) 126 return retval; 127 128 rxconf = dev_info.default_rxconf; 129 130 if (hw_timestamping) { 131 if (!(dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP)) { 132 printf("\nERROR: Port %u does not support hardware timestamping\n" 133 , port); 134 return -1; 135 } 136 rxconf.offloads |= DEV_RX_OFFLOAD_TIMESTAMP; 137 } 138 139 for (q = 0; q < rx_rings; q++) { 140 retval = rte_eth_rx_queue_setup(port, q, nb_rxd, 141 rte_eth_dev_socket_id(port), &rxconf, mbuf_pool); 142 if (retval < 0) 143 return retval; 144 } 145 146 txconf = dev_info.default_txconf; 147 txconf.offloads = port_conf.txmode.offloads; 148 for (q = 0; q < tx_rings; q++) { 149 retval = rte_eth_tx_queue_setup(port, q, nb_txd, 150 rte_eth_dev_socket_id(port), &txconf); 151 if (retval < 0) 152 return retval; 153 } 154 155 retval = rte_eth_dev_start(port); 156 if (retval < 0) 157 return retval; 158 159 if (hw_timestamping && ticks_per_cycle_mult == 0) { 160 uint64_t cycles_base = rte_rdtsc(); 161 uint64_t ticks_base; 162 retval = rte_eth_read_clock(port, &ticks_base); 163 if (retval != 0) 164 return retval; 165 rte_delay_ms(100); 166 uint64_t cycles = rte_rdtsc(); 167 uint64_t ticks; 168 rte_eth_read_clock(port, &ticks); 169 uint64_t c_freq = cycles - cycles_base; 170 uint64_t t_freq = ticks - ticks_base; 171 double freq_mult = (double)c_freq / t_freq; 172 printf("TSC Freq ~= %" PRIu64 173 "\nHW Freq ~= %" PRIu64 174 "\nRatio : %f\n", 175 c_freq * 10, t_freq * 10, freq_mult); 176 /* TSC will be faster than internal ticks so freq_mult is > 0 177 * We convert the multiplication to an integer shift & mult 178 */ 179 ticks_per_cycle_mult = (1 << TICKS_PER_CYCLE_SHIFT) / freq_mult; 180 } 181 182 struct rte_ether_addr addr; 183 184 rte_eth_macaddr_get(port, &addr); 185 printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 186 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", 187 (unsigned)port, 188 addr.addr_bytes[0], addr.addr_bytes[1], 189 addr.addr_bytes[2], addr.addr_bytes[3], 190 addr.addr_bytes[4], addr.addr_bytes[5]); 191 192 rte_eth_promiscuous_enable(port); 193 rte_eth_add_rx_callback(port, 0, add_timestamps, NULL); 194 rte_eth_add_tx_callback(port, 0, calc_latency, NULL); 195 196 return 0; 197 } 198 199 /* 200 * Main thread that does the work, reading from INPUT_PORT 201 * and writing to OUTPUT_PORT 202 */ 203 static __attribute__((noreturn)) void 204 lcore_main(void) 205 { 206 uint16_t port; 207 208 RTE_ETH_FOREACH_DEV(port) 209 if (rte_eth_dev_socket_id(port) > 0 && 210 rte_eth_dev_socket_id(port) != 211 (int)rte_socket_id()) 212 printf("WARNING, port %u is on remote NUMA node to " 213 "polling thread.\n\tPerformance will " 214 "not be optimal.\n", port); 215 216 printf("\nCore %u forwarding packets. [Ctrl+C to quit]\n", 217 rte_lcore_id()); 218 for (;;) { 219 RTE_ETH_FOREACH_DEV(port) { 220 struct rte_mbuf *bufs[BURST_SIZE]; 221 const uint16_t nb_rx = rte_eth_rx_burst(port, 0, 222 bufs, BURST_SIZE); 223 if (unlikely(nb_rx == 0)) 224 continue; 225 const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0, 226 bufs, nb_rx); 227 if (unlikely(nb_tx < nb_rx)) { 228 uint16_t buf; 229 230 for (buf = nb_tx; buf < nb_rx; buf++) 231 rte_pktmbuf_free(bufs[buf]); 232 } 233 } 234 } 235 } 236 237 /* Main function, does initialisation and calls the per-lcore functions */ 238 int 239 main(int argc, char *argv[]) 240 { 241 struct rte_mempool *mbuf_pool; 242 uint16_t nb_ports; 243 uint16_t portid; 244 struct option lgopts[] = { 245 { NULL, 0, 0, 0 } 246 }; 247 int opt, option_index; 248 249 250 /* init EAL */ 251 int ret = rte_eal_init(argc, argv); 252 253 if (ret < 0) 254 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n"); 255 argc -= ret; 256 argv += ret; 257 258 while ((opt = getopt_long(argc, argv, "t", lgopts, &option_index)) 259 != EOF) 260 switch (opt) { 261 case 't': 262 hw_timestamping = 1; 263 break; 264 default: 265 printf(usage, argv[0]); 266 return -1; 267 } 268 optind = 1; /* reset getopt lib */ 269 270 nb_ports = rte_eth_dev_count_avail(); 271 if (nb_ports < 2 || (nb_ports & 1)) 272 rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n"); 273 274 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", 275 NUM_MBUFS * nb_ports, MBUF_CACHE_SIZE, 0, 276 RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id()); 277 if (mbuf_pool == NULL) 278 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); 279 280 /* initialize all ports */ 281 RTE_ETH_FOREACH_DEV(portid) 282 if (port_init(portid, mbuf_pool) != 0) 283 rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8"\n", 284 portid); 285 286 if (rte_lcore_count() > 1) 287 printf("\nWARNING: Too much enabled lcores - " 288 "App uses only 1 lcore\n"); 289 290 /* call lcore_main on master core only */ 291 lcore_main(); 292 return 0; 293 } 294