1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2015 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <inttypes.h> 7 #include <rte_eal.h> 8 #include <rte_ethdev.h> 9 #include <rte_cycles.h> 10 #include <rte_lcore.h> 11 #include <rte_mbuf.h> 12 13 #define RX_RING_SIZE 1024 14 #define TX_RING_SIZE 1024 15 16 #define NUM_MBUFS 8191 17 #define MBUF_CACHE_SIZE 250 18 #define BURST_SIZE 32 19 20 static const struct rte_eth_conf port_conf_default = { 21 .rxmode = { 22 .max_rx_pkt_len = ETHER_MAX_LEN, 23 .ignore_offload_bitfield = 1, 24 }, 25 }; 26 27 static struct { 28 uint64_t total_cycles; 29 uint64_t total_pkts; 30 } latency_numbers; 31 32 33 static uint16_t 34 add_timestamps(uint16_t port __rte_unused, uint16_t qidx __rte_unused, 35 struct rte_mbuf **pkts, uint16_t nb_pkts, 36 uint16_t max_pkts __rte_unused, void *_ __rte_unused) 37 { 38 unsigned i; 39 uint64_t now = rte_rdtsc(); 40 41 for (i = 0; i < nb_pkts; i++) 42 pkts[i]->udata64 = now; 43 return nb_pkts; 44 } 45 46 static uint16_t 47 calc_latency(uint16_t port __rte_unused, uint16_t qidx __rte_unused, 48 struct rte_mbuf **pkts, uint16_t nb_pkts, void *_ __rte_unused) 49 { 50 uint64_t cycles = 0; 51 uint64_t now = rte_rdtsc(); 52 unsigned i; 53 54 for (i = 0; i < nb_pkts; i++) 55 cycles += now - pkts[i]->udata64; 56 latency_numbers.total_cycles += cycles; 57 latency_numbers.total_pkts += nb_pkts; 58 59 if (latency_numbers.total_pkts > (100 * 1000 * 1000ULL)) { 60 printf("Latency = %"PRIu64" cycles\n", 61 latency_numbers.total_cycles / latency_numbers.total_pkts); 62 latency_numbers.total_cycles = latency_numbers.total_pkts = 0; 63 } 64 return nb_pkts; 65 } 66 67 /* 68 * Initialises a given port using global settings and with the rx buffers 69 * coming from the mbuf_pool passed as parameter 70 */ 71 static inline int 72 port_init(uint16_t port, struct rte_mempool *mbuf_pool) 73 { 74 struct rte_eth_conf port_conf = port_conf_default; 75 const uint16_t rx_rings = 1, tx_rings = 1; 76 uint16_t nb_rxd = RX_RING_SIZE; 77 uint16_t nb_txd = TX_RING_SIZE; 78 int retval; 79 uint16_t q; 80 struct rte_eth_dev_info dev_info; 81 struct rte_eth_txconf txconf; 82 83 if (!rte_eth_dev_is_valid_port(port)) 84 return -1; 85 86 rte_eth_dev_info_get(port, &dev_info); 87 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) 88 port_conf.txmode.offloads |= 89 DEV_TX_OFFLOAD_MBUF_FAST_FREE; 90 91 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); 92 if (retval != 0) 93 return retval; 94 95 retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd); 96 if (retval != 0) 97 return retval; 98 99 for (q = 0; q < rx_rings; q++) { 100 retval = rte_eth_rx_queue_setup(port, q, nb_rxd, 101 rte_eth_dev_socket_id(port), NULL, mbuf_pool); 102 if (retval < 0) 103 return retval; 104 } 105 106 txconf = dev_info.default_txconf; 107 txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE; 108 txconf.offloads = port_conf.txmode.offloads; 109 for (q = 0; q < tx_rings; q++) { 110 retval = rte_eth_tx_queue_setup(port, q, nb_txd, 111 rte_eth_dev_socket_id(port), &txconf); 112 if (retval < 0) 113 return retval; 114 } 115 116 retval = rte_eth_dev_start(port); 117 if (retval < 0) 118 return retval; 119 120 struct ether_addr addr; 121 122 rte_eth_macaddr_get(port, &addr); 123 printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 124 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", 125 (unsigned)port, 126 addr.addr_bytes[0], addr.addr_bytes[1], 127 addr.addr_bytes[2], addr.addr_bytes[3], 128 addr.addr_bytes[4], addr.addr_bytes[5]); 129 130 rte_eth_promiscuous_enable(port); 131 rte_eth_add_rx_callback(port, 0, add_timestamps, NULL); 132 rte_eth_add_tx_callback(port, 0, calc_latency, NULL); 133 134 return 0; 135 } 136 137 /* 138 * Main thread that does the work, reading from INPUT_PORT 139 * and writing to OUTPUT_PORT 140 */ 141 static __attribute__((noreturn)) void 142 lcore_main(void) 143 { 144 uint16_t port; 145 146 RTE_ETH_FOREACH_DEV(port) 147 if (rte_eth_dev_socket_id(port) > 0 && 148 rte_eth_dev_socket_id(port) != 149 (int)rte_socket_id()) 150 printf("WARNING, port %u is on remote NUMA node to " 151 "polling thread.\n\tPerformance will " 152 "not be optimal.\n", port); 153 154 printf("\nCore %u forwarding packets. [Ctrl+C to quit]\n", 155 rte_lcore_id()); 156 for (;;) { 157 RTE_ETH_FOREACH_DEV(port) { 158 struct rte_mbuf *bufs[BURST_SIZE]; 159 const uint16_t nb_rx = rte_eth_rx_burst(port, 0, 160 bufs, BURST_SIZE); 161 if (unlikely(nb_rx == 0)) 162 continue; 163 const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0, 164 bufs, nb_rx); 165 if (unlikely(nb_tx < nb_rx)) { 166 uint16_t buf; 167 168 for (buf = nb_tx; buf < nb_rx; buf++) 169 rte_pktmbuf_free(bufs[buf]); 170 } 171 } 172 } 173 } 174 175 /* Main function, does initialisation and calls the per-lcore functions */ 176 int 177 main(int argc, char *argv[]) 178 { 179 struct rte_mempool *mbuf_pool; 180 uint16_t nb_ports; 181 uint16_t portid; 182 183 /* init EAL */ 184 int ret = rte_eal_init(argc, argv); 185 186 if (ret < 0) 187 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n"); 188 argc -= ret; 189 argv += ret; 190 191 nb_ports = rte_eth_dev_count_avail(); 192 if (nb_ports < 2 || (nb_ports & 1)) 193 rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n"); 194 195 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", 196 NUM_MBUFS * nb_ports, MBUF_CACHE_SIZE, 0, 197 RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id()); 198 if (mbuf_pool == NULL) 199 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); 200 201 /* initialize all ports */ 202 RTE_ETH_FOREACH_DEV(portid) 203 if (port_init(portid, mbuf_pool) != 0) 204 rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8"\n", 205 portid); 206 207 if (rte_lcore_count() > 1) 208 printf("\nWARNING: Too much enabled lcores - " 209 "App uses only 1 lcore\n"); 210 211 /* call lcore_main on master core only */ 212 lcore_main(); 213 return 0; 214 } 215