1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2015 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <inttypes.h> 7 #include <getopt.h> 8 #include <rte_eal.h> 9 #include <rte_ethdev.h> 10 #include <rte_cycles.h> 11 #include <rte_lcore.h> 12 #include <rte_mbuf.h> 13 #include <rte_mbuf_dyn.h> 14 15 #define RX_RING_SIZE 1024 16 #define TX_RING_SIZE 1024 17 18 #define NUM_MBUFS 8191 19 #define MBUF_CACHE_SIZE 250 20 #define BURST_SIZE 32 21 22 static int hwts_dynfield_offset = -1; 23 24 static inline rte_mbuf_timestamp_t * 25 hwts_field(struct rte_mbuf *mbuf) 26 { 27 return RTE_MBUF_DYNFIELD(mbuf, 28 hwts_dynfield_offset, rte_mbuf_timestamp_t *); 29 } 30 31 typedef uint64_t tsc_t; 32 static int tsc_dynfield_offset = -1; 33 34 static inline tsc_t * 35 tsc_field(struct rte_mbuf *mbuf) 36 { 37 return RTE_MBUF_DYNFIELD(mbuf, tsc_dynfield_offset, tsc_t *); 38 } 39 40 static const char usage[] = 41 "%s EAL_ARGS -- [-t]\n"; 42 43 static struct { 44 uint64_t total_cycles; 45 uint64_t total_queue_cycles; 46 uint64_t total_pkts; 47 } latency_numbers; 48 49 int hw_timestamping; 50 51 #define TICKS_PER_CYCLE_SHIFT 16 52 static uint64_t ticks_per_cycle_mult; 53 54 /* Callback added to the RX port and applied to packets. 8< */ 55 static uint16_t 56 add_timestamps(uint16_t port __rte_unused, uint16_t qidx __rte_unused, 57 struct rte_mbuf **pkts, uint16_t nb_pkts, 58 uint16_t max_pkts __rte_unused, void *_ __rte_unused) 59 { 60 unsigned i; 61 uint64_t now = rte_rdtsc(); 62 63 for (i = 0; i < nb_pkts; i++) 64 *tsc_field(pkts[i]) = now; 65 return nb_pkts; 66 } 67 /* >8 End of callback addition and application. */ 68 69 /* Callback is added to the TX port. 8< */ 70 static uint16_t 71 calc_latency(uint16_t port, uint16_t qidx __rte_unused, 72 struct rte_mbuf **pkts, uint16_t nb_pkts, void *_ __rte_unused) 73 { 74 uint64_t cycles = 0; 75 uint64_t queue_ticks = 0; 76 uint64_t now = rte_rdtsc(); 77 uint64_t ticks; 78 unsigned i; 79 80 if (hw_timestamping) 81 rte_eth_read_clock(port, &ticks); 82 83 for (i = 0; i < nb_pkts; i++) { 84 cycles += now - *tsc_field(pkts[i]); 85 if (hw_timestamping) 86 queue_ticks += ticks - *hwts_field(pkts[i]); 87 } 88 89 latency_numbers.total_cycles += cycles; 90 if (hw_timestamping) 91 latency_numbers.total_queue_cycles += (queue_ticks 92 * ticks_per_cycle_mult) >> TICKS_PER_CYCLE_SHIFT; 93 94 latency_numbers.total_pkts += nb_pkts; 95 96 if (latency_numbers.total_pkts > (100 * 1000 * 1000ULL)) { 97 printf("Latency = %"PRIu64" cycles\n", 98 latency_numbers.total_cycles / latency_numbers.total_pkts); 99 if (hw_timestamping) { 100 printf("Latency from HW = %"PRIu64" cycles\n", 101 latency_numbers.total_queue_cycles 102 / latency_numbers.total_pkts); 103 } 104 latency_numbers.total_cycles = 0; 105 latency_numbers.total_queue_cycles = 0; 106 latency_numbers.total_pkts = 0; 107 } 108 return nb_pkts; 109 } 110 /* >8 End of callback addition. */ 111 112 /* 113 * Initialises a given port using global settings and with the rx buffers 114 * coming from the mbuf_pool passed as parameter 115 */ 116 117 /* Port initialization. 8< */ 118 static inline int 119 port_init(uint16_t port, struct rte_mempool *mbuf_pool) 120 { 121 struct rte_eth_conf port_conf; 122 const uint16_t rx_rings = 1, tx_rings = 1; 123 uint16_t nb_rxd = RX_RING_SIZE; 124 uint16_t nb_txd = TX_RING_SIZE; 125 int retval; 126 uint16_t q; 127 struct rte_eth_dev_info dev_info; 128 struct rte_eth_rxconf rxconf; 129 struct rte_eth_txconf txconf; 130 131 if (!rte_eth_dev_is_valid_port(port)) 132 return -1; 133 134 memset(&port_conf, 0, sizeof(struct rte_eth_conf)); 135 136 retval = rte_eth_dev_info_get(port, &dev_info); 137 if (retval != 0) { 138 printf("Error during getting device (port %u) info: %s\n", 139 port, strerror(-retval)); 140 141 return retval; 142 } 143 144 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) 145 port_conf.txmode.offloads |= 146 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 147 148 if (hw_timestamping) { 149 if (!(dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) { 150 printf("\nERROR: Port %u does not support hardware timestamping\n" 151 , port); 152 return -1; 153 } 154 port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP; 155 rte_mbuf_dyn_rx_timestamp_register(&hwts_dynfield_offset, NULL); 156 if (hwts_dynfield_offset < 0) { 157 printf("ERROR: Failed to register timestamp field\n"); 158 return -rte_errno; 159 } 160 } 161 162 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); 163 if (retval != 0) 164 return retval; 165 166 retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd); 167 if (retval != 0) 168 return retval; 169 170 rxconf = dev_info.default_rxconf; 171 172 for (q = 0; q < rx_rings; q++) { 173 retval = rte_eth_rx_queue_setup(port, q, nb_rxd, 174 rte_eth_dev_socket_id(port), &rxconf, mbuf_pool); 175 if (retval < 0) 176 return retval; 177 } 178 179 txconf = dev_info.default_txconf; 180 txconf.offloads = port_conf.txmode.offloads; 181 for (q = 0; q < tx_rings; q++) { 182 retval = rte_eth_tx_queue_setup(port, q, nb_txd, 183 rte_eth_dev_socket_id(port), &txconf); 184 if (retval < 0) 185 return retval; 186 } 187 188 retval = rte_eth_dev_start(port); 189 if (retval < 0) 190 return retval; 191 192 if (hw_timestamping && ticks_per_cycle_mult == 0) { 193 uint64_t cycles_base = rte_rdtsc(); 194 uint64_t ticks_base; 195 retval = rte_eth_read_clock(port, &ticks_base); 196 if (retval != 0) 197 return retval; 198 rte_delay_ms(100); 199 uint64_t cycles = rte_rdtsc(); 200 uint64_t ticks; 201 rte_eth_read_clock(port, &ticks); 202 uint64_t c_freq = cycles - cycles_base; 203 uint64_t t_freq = ticks - ticks_base; 204 double freq_mult = (double)c_freq / t_freq; 205 printf("TSC Freq ~= %" PRIu64 206 "\nHW Freq ~= %" PRIu64 207 "\nRatio : %f\n", 208 c_freq * 10, t_freq * 10, freq_mult); 209 /* TSC will be faster than internal ticks so freq_mult is > 0 210 * We convert the multiplication to an integer shift & mult 211 */ 212 ticks_per_cycle_mult = (1 << TICKS_PER_CYCLE_SHIFT) / freq_mult; 213 } 214 215 struct rte_ether_addr addr; 216 217 retval = rte_eth_macaddr_get(port, &addr); 218 if (retval < 0) { 219 printf("Failed to get MAC address on port %u: %s\n", 220 port, rte_strerror(-retval)); 221 return retval; 222 } 223 printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 224 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", 225 (unsigned)port, 226 RTE_ETHER_ADDR_BYTES(&addr)); 227 228 retval = rte_eth_promiscuous_enable(port); 229 if (retval != 0) 230 return retval; 231 232 /* RX and TX callbacks are added to the ports. 8< */ 233 rte_eth_add_rx_callback(port, 0, add_timestamps, NULL); 234 rte_eth_add_tx_callback(port, 0, calc_latency, NULL); 235 /* >8 End of RX and TX callbacks. */ 236 237 return 0; 238 } 239 /* >8 End of port initialization. */ 240 241 /* 242 * Main thread that does the work, reading from INPUT_PORT 243 * and writing to OUTPUT_PORT 244 */ 245 static __rte_noreturn void 246 lcore_main(void) 247 { 248 uint16_t port; 249 250 RTE_ETH_FOREACH_DEV(port) 251 if (rte_eth_dev_socket_id(port) > 0 && 252 rte_eth_dev_socket_id(port) != 253 (int)rte_socket_id()) 254 printf("WARNING, port %u is on remote NUMA node to " 255 "polling thread.\n\tPerformance will " 256 "not be optimal.\n", port); 257 258 printf("\nCore %u forwarding packets. [Ctrl+C to quit]\n", 259 rte_lcore_id()); 260 for (;;) { 261 RTE_ETH_FOREACH_DEV(port) { 262 struct rte_mbuf *bufs[BURST_SIZE]; 263 const uint16_t nb_rx = rte_eth_rx_burst(port, 0, 264 bufs, BURST_SIZE); 265 if (unlikely(nb_rx == 0)) 266 continue; 267 const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0, 268 bufs, nb_rx); 269 if (unlikely(nb_tx < nb_rx)) { 270 uint16_t buf; 271 272 for (buf = nb_tx; buf < nb_rx; buf++) 273 rte_pktmbuf_free(bufs[buf]); 274 } 275 } 276 } 277 } 278 279 /* Main function, does initialisation and calls the per-lcore functions */ 280 int 281 main(int argc, char *argv[]) 282 { 283 struct rte_mempool *mbuf_pool; 284 uint16_t nb_ports; 285 uint16_t portid; 286 struct option lgopts[] = { 287 { NULL, 0, 0, 0 } 288 }; 289 int opt, option_index; 290 291 static const struct rte_mbuf_dynfield tsc_dynfield_desc = { 292 .name = "example_bbdev_dynfield_tsc", 293 .size = sizeof(tsc_t), 294 .align = __alignof__(tsc_t), 295 }; 296 297 /* init EAL */ 298 int ret = rte_eal_init(argc, argv); 299 300 if (ret < 0) 301 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n"); 302 argc -= ret; 303 argv += ret; 304 305 while ((opt = getopt_long(argc, argv, "t", lgopts, &option_index)) 306 != EOF) 307 switch (opt) { 308 case 't': 309 hw_timestamping = 1; 310 break; 311 default: 312 printf(usage, argv[0]); 313 return -1; 314 } 315 optind = 1; /* reset getopt lib */ 316 317 nb_ports = rte_eth_dev_count_avail(); 318 if (nb_ports < 2 || (nb_ports & 1)) 319 rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n"); 320 321 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", 322 NUM_MBUFS * nb_ports, MBUF_CACHE_SIZE, 0, 323 RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id()); 324 if (mbuf_pool == NULL) 325 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); 326 327 tsc_dynfield_offset = 328 rte_mbuf_dynfield_register(&tsc_dynfield_desc); 329 if (tsc_dynfield_offset < 0) 330 rte_exit(EXIT_FAILURE, "Cannot register mbuf field\n"); 331 332 /* initialize all ports */ 333 RTE_ETH_FOREACH_DEV(portid) 334 if (port_init(portid, mbuf_pool) != 0) 335 rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu16"\n", 336 portid); 337 338 if (rte_lcore_count() > 1) 339 printf("\nWARNING: Too much enabled lcores - " 340 "App uses only 1 lcore\n"); 341 342 /* call lcore_main on main core only */ 343 lcore_main(); 344 345 /* clean up the EAL */ 346 rte_eal_cleanup(); 347 348 return 0; 349 } 350