1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2015 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 35 #include <stdio.h> 36 #include <stdlib.h> 37 38 #include <rte_common.h> 39 #include <rte_spinlock.h> 40 #include <rte_eal.h> 41 #include <rte_ethdev.h> 42 #include <rte_ether.h> 43 #include <rte_ip.h> 44 #include <rte_memory.h> 45 #include <rte_mempool.h> 46 #include <rte_mbuf.h> 47 48 #include "ethapp.h" 49 50 #define MAX_PORTS RTE_MAX_ETHPORTS 51 #define MAX_BURST_LENGTH 32 52 #define PORT_RX_QUEUE_SIZE 128 53 #define PORT_TX_QUEUE_SIZE 256 54 #define PKTPOOL_EXTRA_SIZE 512 55 #define PKTPOOL_CACHE 32 56 57 58 struct txq_port { 59 uint16_t cnt_unsent; 60 struct rte_mbuf *buf_frames[MAX_BURST_LENGTH]; 61 }; 62 63 struct app_port { 64 struct ether_addr mac_addr; 65 struct txq_port txq; 66 rte_spinlock_t lock; 67 int port_active; 68 int port_dirty; 69 int idx_port; 70 struct rte_mempool *pkt_pool; 71 }; 72 73 struct app_config { 74 struct app_port ports[MAX_PORTS]; 75 int cnt_ports; 76 int exit_now; 77 }; 78 79 80 struct app_config app_cfg; 81 82 83 void lock_port(int idx_port) 84 { 85 struct app_port *ptr_port = &app_cfg.ports[idx_port]; 86 87 rte_spinlock_lock(&ptr_port->lock); 88 } 89 90 void unlock_port(int idx_port) 91 { 92 struct app_port *ptr_port = &app_cfg.ports[idx_port]; 93 94 rte_spinlock_unlock(&ptr_port->lock); 95 } 96 97 void mark_port_active(int idx_port) 98 { 99 struct app_port *ptr_port = &app_cfg.ports[idx_port]; 100 101 ptr_port->port_active = 1; 102 } 103 104 void mark_port_inactive(int idx_port) 105 { 106 struct app_port *ptr_port = &app_cfg.ports[idx_port]; 107 108 ptr_port->port_active = 0; 109 } 110 111 void mark_port_newmac(int idx_port) 112 { 113 struct app_port *ptr_port = &app_cfg.ports[idx_port]; 114 115 ptr_port->port_dirty = 1; 116 } 117 118 static void setup_ports(struct app_config *app_cfg, int cnt_ports) 119 { 120 int idx_port; 121 int size_pktpool; 122 struct rte_eth_conf cfg_port; 123 struct rte_eth_dev_info dev_info; 124 char str_name[16]; 125 126 memset(&cfg_port, 0, sizeof(cfg_port)); 127 cfg_port.txmode.mq_mode = ETH_MQ_TX_NONE; 128 129 for (idx_port = 0; idx_port < cnt_ports; idx_port++) { 130 struct app_port *ptr_port = &app_cfg->ports[idx_port]; 131 132 rte_eth_dev_info_get(idx_port, &dev_info); 133 size_pktpool = dev_info.rx_desc_lim.nb_max + 134 dev_info.tx_desc_lim.nb_max + PKTPOOL_EXTRA_SIZE; 135 136 snprintf(str_name, 16, "pkt_pool%i", idx_port); 137 ptr_port->pkt_pool = rte_pktmbuf_pool_create( 138 str_name, 139 size_pktpool, PKTPOOL_CACHE, 140 0, 141 RTE_MBUF_DEFAULT_BUF_SIZE, 142 rte_socket_id() 143 ); 144 if (ptr_port->pkt_pool == NULL) 145 rte_exit(EXIT_FAILURE, 146 "rte_pktmbuf_pool_create failed" 147 ); 148 149 printf("Init port %i..\n", idx_port); 150 ptr_port->port_active = 1; 151 ptr_port->port_dirty = 0; 152 ptr_port->idx_port = idx_port; 153 154 if (rte_eth_dev_configure(idx_port, 1, 1, &cfg_port) < 0) 155 rte_exit(EXIT_FAILURE, 156 "rte_eth_dev_configure failed"); 157 if (rte_eth_rx_queue_setup( 158 idx_port, 0, PORT_RX_QUEUE_SIZE, 159 rte_eth_dev_socket_id(idx_port), NULL, 160 ptr_port->pkt_pool) < 0) 161 rte_exit(EXIT_FAILURE, 162 "rte_eth_rx_queue_setup failed" 163 ); 164 if (rte_eth_tx_queue_setup( 165 idx_port, 0, PORT_TX_QUEUE_SIZE, 166 rte_eth_dev_socket_id(idx_port), NULL) < 0) 167 rte_exit(EXIT_FAILURE, 168 "rte_eth_tx_queue_setup failed" 169 ); 170 if (rte_eth_dev_start(idx_port) < 0) 171 rte_exit(EXIT_FAILURE, 172 "%s:%i: rte_eth_dev_start failed", 173 __FILE__, __LINE__ 174 ); 175 rte_eth_macaddr_get(idx_port, &ptr_port->mac_addr); 176 rte_spinlock_init(&ptr_port->lock); 177 } 178 } 179 180 static void process_frame(struct app_port *ptr_port, 181 struct rte_mbuf *ptr_frame) 182 { 183 struct ether_hdr *ptr_mac_hdr; 184 185 ptr_mac_hdr = rte_pktmbuf_mtod(ptr_frame, struct ether_hdr *); 186 ether_addr_copy(&ptr_mac_hdr->s_addr, &ptr_mac_hdr->d_addr); 187 ether_addr_copy(&ptr_port->mac_addr, &ptr_mac_hdr->s_addr); 188 } 189 190 static int slave_main(__attribute__((unused)) void *ptr_data) 191 { 192 struct app_port *ptr_port; 193 struct rte_mbuf *ptr_frame; 194 struct txq_port *txq; 195 196 uint16_t cnt_recv_frames; 197 uint16_t idx_frame; 198 uint16_t cnt_sent; 199 uint16_t idx_port; 200 uint16_t lock_result; 201 202 while (app_cfg.exit_now == 0) { 203 for (idx_port = 0; idx_port < app_cfg.cnt_ports; idx_port++) { 204 /* Check that port is active and unlocked */ 205 ptr_port = &app_cfg.ports[idx_port]; 206 lock_result = rte_spinlock_trylock(&ptr_port->lock); 207 if (lock_result == 0) 208 continue; 209 if (ptr_port->port_active == 0) { 210 rte_spinlock_unlock(&ptr_port->lock); 211 continue; 212 } 213 txq = &ptr_port->txq; 214 215 /* MAC address was updated */ 216 if (ptr_port->port_dirty == 1) { 217 rte_eth_macaddr_get(ptr_port->idx_port, 218 &ptr_port->mac_addr); 219 ptr_port->port_dirty = 0; 220 } 221 222 /* Incoming frames */ 223 cnt_recv_frames = rte_eth_rx_burst( 224 ptr_port->idx_port, 0, 225 &txq->buf_frames[txq->cnt_unsent], 226 RTE_DIM(txq->buf_frames) - txq->cnt_unsent 227 ); 228 if (cnt_recv_frames > 0) { 229 for (idx_frame = 0; 230 idx_frame < cnt_recv_frames; 231 idx_frame++) { 232 ptr_frame = txq->buf_frames[ 233 idx_frame + txq->cnt_unsent]; 234 process_frame(ptr_port, ptr_frame); 235 } 236 txq->cnt_unsent += cnt_recv_frames; 237 } 238 239 /* Outgoing frames */ 240 if (txq->cnt_unsent > 0) { 241 cnt_sent = rte_eth_tx_burst( 242 ptr_port->idx_port, 0, 243 txq->buf_frames, 244 txq->cnt_unsent 245 ); 246 /* Shuffle up unsent frame pointers */ 247 for (idx_frame = cnt_sent; 248 idx_frame < txq->cnt_unsent; 249 idx_frame++) 250 txq->buf_frames[idx_frame - cnt_sent] = 251 txq->buf_frames[idx_frame]; 252 txq->cnt_unsent -= cnt_sent; 253 } 254 rte_spinlock_unlock(&ptr_port->lock); 255 } /* end for( idx_port ) */ 256 } /* end for(;;) */ 257 258 return 0; 259 } 260 261 int main(int argc, char **argv) 262 { 263 int cnt_args_parsed; 264 uint32_t id_core; 265 uint32_t cnt_ports; 266 267 /* Init runtime environment */ 268 cnt_args_parsed = rte_eal_init(argc, argv); 269 if (cnt_args_parsed < 0) 270 rte_exit(EXIT_FAILURE, "rte_eal_init(): Failed"); 271 272 cnt_ports = rte_eth_dev_count(); 273 printf("Number of NICs: %i\n", cnt_ports); 274 if (cnt_ports == 0) 275 rte_exit(EXIT_FAILURE, "No available NIC ports!\n"); 276 if (cnt_ports > MAX_PORTS) { 277 printf("Info: Using only %i of %i ports\n", 278 cnt_ports, MAX_PORTS 279 ); 280 cnt_ports = MAX_PORTS; 281 } 282 283 setup_ports(&app_cfg, cnt_ports); 284 285 app_cfg.exit_now = 0; 286 app_cfg.cnt_ports = cnt_ports; 287 288 if (rte_lcore_count() < 2) 289 rte_exit(EXIT_FAILURE, "No available slave core!\n"); 290 /* Assume there is an available slave.. */ 291 id_core = rte_lcore_id(); 292 id_core = rte_get_next_lcore(id_core, 1, 1); 293 rte_eal_remote_launch(slave_main, NULL, id_core); 294 295 ethapp_main(); 296 297 app_cfg.exit_now = 1; 298 RTE_LCORE_FOREACH_SLAVE(id_core) { 299 if (rte_eal_wait_lcore(id_core) < 0) 300 return -1; 301 } 302 303 return 0; 304 } 305