1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <stdlib.h> 7 #include <memory.h> 8 9 #include <rte_log.h> 10 #include <rte_mbuf.h> 11 #include <rte_debug.h> 12 #include <rte_ethdev.h> 13 #include <rte_mempool.h> 14 #include <rte_sched.h> 15 #include <rte_cycles.h> 16 #include <rte_string_fns.h> 17 #include <rte_cfgfile.h> 18 19 #include "main.h" 20 #include "cfg_file.h" 21 22 uint32_t app_numa_mask = 0; 23 static uint32_t app_inited_port_mask = 0; 24 25 int app_pipe_to_profile[MAX_SCHED_SUBPORTS][MAX_SCHED_PIPES]; 26 27 #define MAX_NAME_LEN 32 28 29 struct ring_conf ring_conf = { 30 .rx_size = APP_RX_DESC_DEFAULT, 31 .ring_size = APP_RING_SIZE, 32 .tx_size = APP_TX_DESC_DEFAULT, 33 }; 34 35 struct burst_conf burst_conf = { 36 .rx_burst = MAX_PKT_RX_BURST, 37 .ring_burst = PKT_ENQUEUE, 38 .qos_dequeue = PKT_DEQUEUE, 39 .tx_burst = MAX_PKT_TX_BURST, 40 }; 41 42 struct ring_thresh rx_thresh = { 43 .pthresh = RX_PTHRESH, 44 .hthresh = RX_HTHRESH, 45 .wthresh = RX_WTHRESH, 46 }; 47 48 struct ring_thresh tx_thresh = { 49 .pthresh = TX_PTHRESH, 50 .hthresh = TX_HTHRESH, 51 .wthresh = TX_WTHRESH, 52 }; 53 54 uint32_t nb_pfc; 55 const char *cfg_profile = NULL; 56 int mp_size = NB_MBUF; 57 struct flow_conf qos_conf[MAX_DATA_STREAMS]; 58 59 static struct rte_eth_conf port_conf = { 60 .txmode = { 61 .mq_mode = RTE_ETH_MQ_TX_NONE, 62 }, 63 }; 64 65 static int 66 app_init_port(uint16_t portid, struct rte_mempool *mp) 67 { 68 int ret; 69 struct rte_eth_link link; 70 struct rte_eth_dev_info dev_info; 71 struct rte_eth_rxconf rx_conf; 72 struct rte_eth_txconf tx_conf; 73 uint16_t rx_size; 74 uint16_t tx_size; 75 struct rte_eth_conf local_port_conf = port_conf; 76 char link_status_text[RTE_ETH_LINK_MAX_STR_LEN]; 77 78 /* check if port already initialized (multistream configuration) */ 79 if (app_inited_port_mask & (1u << portid)) 80 return 0; 81 82 memset(&rx_conf, 0, sizeof(struct rte_eth_rxconf)); 83 rx_conf.rx_thresh.pthresh = rx_thresh.pthresh; 84 rx_conf.rx_thresh.hthresh = rx_thresh.hthresh; 85 rx_conf.rx_thresh.wthresh = rx_thresh.wthresh; 86 rx_conf.rx_free_thresh = 32; 87 rx_conf.rx_drop_en = 0; 88 rx_conf.rx_deferred_start = 0; 89 90 memset(&tx_conf, 0, sizeof(struct rte_eth_txconf)); 91 tx_conf.tx_thresh.pthresh = tx_thresh.pthresh; 92 tx_conf.tx_thresh.hthresh = tx_thresh.hthresh; 93 tx_conf.tx_thresh.wthresh = tx_thresh.wthresh; 94 tx_conf.tx_free_thresh = 0; 95 tx_conf.tx_rs_thresh = 0; 96 tx_conf.tx_deferred_start = 0; 97 98 /* init port */ 99 RTE_LOG(INFO, APP, "Initializing port %"PRIu16"... ", portid); 100 fflush(stdout); 101 102 ret = rte_eth_dev_info_get(portid, &dev_info); 103 if (ret != 0) 104 rte_exit(EXIT_FAILURE, 105 "Error during getting device (port %u) info: %s\n", 106 portid, strerror(-ret)); 107 108 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) 109 local_port_conf.txmode.offloads |= 110 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 111 ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf); 112 if (ret < 0) 113 rte_exit(EXIT_FAILURE, 114 "Cannot configure device: err=%d, port=%u\n", 115 ret, portid); 116 117 rx_size = ring_conf.rx_size; 118 tx_size = ring_conf.tx_size; 119 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &rx_size, &tx_size); 120 if (ret < 0) 121 rte_exit(EXIT_FAILURE, 122 "rte_eth_dev_adjust_nb_rx_tx_desc: err=%d,port=%u\n", 123 ret, portid); 124 ring_conf.rx_size = rx_size; 125 ring_conf.tx_size = tx_size; 126 127 /* init one RX queue */ 128 fflush(stdout); 129 rx_conf.offloads = local_port_conf.rxmode.offloads; 130 ret = rte_eth_rx_queue_setup(portid, 0, (uint16_t)ring_conf.rx_size, 131 rte_eth_dev_socket_id(portid), &rx_conf, mp); 132 if (ret < 0) 133 rte_exit(EXIT_FAILURE, 134 "rte_eth_tx_queue_setup: err=%d, port=%u\n", 135 ret, portid); 136 137 /* init one TX queue */ 138 fflush(stdout); 139 tx_conf.offloads = local_port_conf.txmode.offloads; 140 ret = rte_eth_tx_queue_setup(portid, 0, 141 (uint16_t)ring_conf.tx_size, rte_eth_dev_socket_id(portid), &tx_conf); 142 if (ret < 0) 143 rte_exit(EXIT_FAILURE, 144 "rte_eth_tx_queue_setup: err=%d, port=%u queue=%d\n", 145 ret, portid, 0); 146 147 /* Start device */ 148 ret = rte_eth_dev_start(portid); 149 if (ret < 0) 150 rte_exit(EXIT_FAILURE, 151 "rte_pmd_port_start: err=%d, port=%u\n", 152 ret, portid); 153 154 printf("done: "); 155 156 /* get link status */ 157 ret = rte_eth_link_get(portid, &link); 158 if (ret < 0) 159 rte_exit(EXIT_FAILURE, 160 "rte_eth_link_get: err=%d, port=%u: %s\n", 161 ret, portid, rte_strerror(-ret)); 162 163 rte_eth_link_to_str(link_status_text, sizeof(link_status_text), &link); 164 printf("%s\n", link_status_text); 165 166 ret = rte_eth_promiscuous_enable(portid); 167 if (ret != 0) 168 rte_exit(EXIT_FAILURE, 169 "rte_eth_promiscuous_enable: err=%s, port=%u\n", 170 rte_strerror(-ret), portid); 171 172 /* mark port as initialized */ 173 app_inited_port_mask |= 1u << portid; 174 175 return 0; 176 } 177 178 static struct rte_sched_pipe_params pipe_profiles[MAX_SCHED_PIPE_PROFILES] = { 179 { /* Profile #0 */ 180 .tb_rate = 305175, 181 .tb_size = 1000000, 182 183 .tc_rate = {305175, 305175, 305175, 305175, 305175, 305175, 184 305175, 305175, 305175, 305175, 305175, 305175, 305175}, 185 .tc_period = 40, 186 .tc_ov_weight = 1, 187 188 .wrr_weights = {1, 1, 1, 1}, 189 }, 190 }; 191 192 static struct rte_sched_subport_profile_params 193 subport_profile[MAX_SCHED_SUBPORT_PROFILES] = { 194 { 195 .tb_rate = 1250000000, 196 .tb_size = 1000000, 197 .tc_rate = {1250000000, 1250000000, 1250000000, 1250000000, 198 1250000000, 1250000000, 1250000000, 1250000000, 1250000000, 199 1250000000, 1250000000, 1250000000, 1250000000}, 200 .tc_period = 10, 201 }, 202 }; 203 204 struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS] = { 205 { 206 .n_pipes_per_subport_enabled = 4096, 207 .qsize = {64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64}, 208 .pipe_profiles = pipe_profiles, 209 .n_pipe_profiles = sizeof(pipe_profiles) / 210 sizeof(struct rte_sched_pipe_params), 211 .n_max_pipe_profiles = MAX_SCHED_PIPE_PROFILES, 212 .cman_params = NULL, 213 }, 214 }; 215 216 struct rte_sched_port_params port_params = { 217 .name = "port_scheduler_0", 218 .socket = 0, /* computed */ 219 .rate = 0, /* computed */ 220 .mtu = 6 + 6 + 4 + 4 + 2 + 1500, 221 .frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT, 222 .n_subports_per_port = 1, 223 .n_subport_profiles = 1, 224 .subport_profiles = subport_profile, 225 .n_max_subport_profiles = MAX_SCHED_SUBPORT_PROFILES, 226 .n_pipes_per_subport = MAX_SCHED_PIPES, 227 }; 228 229 static struct rte_sched_port * 230 app_init_sched_port(uint32_t portid, uint32_t socketid) 231 { 232 static char port_name[32]; /* static as referenced from global port_params*/ 233 struct rte_eth_link link; 234 struct rte_sched_port *port = NULL; 235 uint32_t pipe, subport; 236 uint32_t pipe_count; 237 int err; 238 239 err = rte_eth_link_get(portid, &link); 240 if (err < 0) 241 rte_exit(EXIT_FAILURE, 242 "rte_eth_link_get: err=%d, port=%u: %s\n", 243 err, portid, rte_strerror(-err)); 244 245 port_params.socket = socketid; 246 port_params.rate = (uint64_t) link.link_speed * 1000 * 1000 / 8; 247 snprintf(port_name, sizeof(port_name), "port_%d", portid); 248 port_params.name = port_name; 249 250 port = rte_sched_port_config(&port_params); 251 if (port == NULL){ 252 rte_exit(EXIT_FAILURE, "Unable to config sched port\n"); 253 } 254 255 for (subport = 0; subport < port_params.n_subports_per_port; subport ++) { 256 err = rte_sched_subport_config(port, subport, 257 &subport_params[subport], 258 0); 259 if (err) { 260 rte_exit(EXIT_FAILURE, "Unable to config sched " 261 "subport %u, err=%d\n", subport, err); 262 } 263 264 uint32_t n_pipes_per_subport = 265 subport_params[subport].n_pipes_per_subport_enabled; 266 267 pipe_count = 0; 268 for (pipe = 0; pipe < n_pipes_per_subport; pipe++) { 269 if (app_pipe_to_profile[subport][pipe] != -1) { 270 err = rte_sched_pipe_config(port, subport, pipe, 271 app_pipe_to_profile[subport][pipe]); 272 if (err) { 273 rte_exit(EXIT_FAILURE, "Unable to config sched pipe %u " 274 "for profile %d, err=%d\n", pipe, 275 app_pipe_to_profile[subport][pipe], err); 276 } 277 pipe_count++; 278 } 279 } 280 281 if (pipe_count == 0) 282 rte_exit(EXIT_FAILURE, "Error: invalid config, no pipes enabled for sched subport %u\n", 283 subport); 284 } 285 286 return port; 287 } 288 289 static int 290 app_load_cfg_profile(const char *profile) 291 { 292 int ret = 0; 293 if (profile == NULL) 294 return 0; 295 struct rte_cfgfile *file = rte_cfgfile_load(profile, 0); 296 if (file == NULL) 297 rte_exit(EXIT_FAILURE, "Cannot load configuration profile %s\n", profile); 298 299 ret = cfg_load_port(file, &port_params); 300 if (ret) 301 goto _app_load_cfg_profile_error_return; 302 303 ret = cfg_load_subport(file, subport_params); 304 if (ret) 305 goto _app_load_cfg_profile_error_return; 306 307 ret = cfg_load_subport_profile(file, subport_profile); 308 if (ret) 309 goto _app_load_cfg_profile_error_return; 310 311 ret = cfg_load_pipe(file, pipe_profiles); 312 if (ret) 313 goto _app_load_cfg_profile_error_return; 314 315 _app_load_cfg_profile_error_return: 316 rte_cfgfile_close(file); 317 318 return ret; 319 } 320 321 int app_init(void) 322 { 323 uint32_t i; 324 char ring_name[MAX_NAME_LEN]; 325 char pool_name[MAX_NAME_LEN]; 326 int ret; 327 328 if (rte_eth_dev_count_avail() == 0) 329 rte_exit(EXIT_FAILURE, "No Ethernet port - bye\n"); 330 331 /* load configuration profile */ 332 if (app_load_cfg_profile(cfg_profile) != 0) 333 rte_exit(EXIT_FAILURE, "Invalid configuration profile\n"); 334 335 /* Initialize each active flow */ 336 for(i = 0; i < nb_pfc; i++) { 337 uint32_t socket = rte_lcore_to_socket_id(qos_conf[i].rx_core); 338 struct rte_ring *ring; 339 struct rte_eth_link link; 340 int retry_count = 100, retry_delay = 100; /* try every 100ms for 10 sec */ 341 342 snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].rx_core); 343 ring = rte_ring_lookup(ring_name); 344 if (ring == NULL) 345 qos_conf[i].rx_ring = rte_ring_create(ring_name, ring_conf.ring_size, 346 socket, RING_F_SP_ENQ | RING_F_SC_DEQ); 347 else 348 qos_conf[i].rx_ring = ring; 349 350 snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].tx_core); 351 ring = rte_ring_lookup(ring_name); 352 if (ring == NULL) 353 qos_conf[i].tx_ring = rte_ring_create(ring_name, ring_conf.ring_size, 354 socket, RING_F_SP_ENQ | RING_F_SC_DEQ); 355 else 356 qos_conf[i].tx_ring = ring; 357 358 359 /* create the mbuf pools for each RX Port */ 360 snprintf(pool_name, MAX_NAME_LEN, "mbuf_pool%u", i); 361 qos_conf[i].mbuf_pool = rte_pktmbuf_pool_create(pool_name, 362 mp_size, burst_conf.rx_burst * 4, 0, 363 RTE_MBUF_DEFAULT_BUF_SIZE, 364 rte_eth_dev_socket_id(qos_conf[i].rx_port)); 365 if (qos_conf[i].mbuf_pool == NULL) 366 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool for socket %u\n", i); 367 368 app_init_port(qos_conf[i].rx_port, qos_conf[i].mbuf_pool); 369 app_init_port(qos_conf[i].tx_port, qos_conf[i].mbuf_pool); 370 371 memset(&link, 0, sizeof(link)); 372 ret = rte_eth_link_get(qos_conf[i].tx_port, &link); 373 if (ret < 0) 374 rte_exit(EXIT_FAILURE, 375 "rte_eth_link_get: err=%d, port=%u: %s\n", 376 ret, qos_conf[i].tx_port, rte_strerror(-ret)); 377 if (link.link_status == 0) 378 printf("Waiting for link on port %u\n", qos_conf[i].tx_port); 379 380 while (link.link_status == 0 && retry_count--) { 381 rte_delay_ms(retry_delay); 382 ret = rte_eth_link_get(qos_conf[i].tx_port, &link); 383 rte_exit(EXIT_FAILURE, 384 "rte_eth_link_get: err=%d, port=%u: %s\n", 385 ret, qos_conf[i].tx_port, rte_strerror(-ret)); 386 } 387 388 qos_conf[i].sched_port = app_init_sched_port(qos_conf[i].tx_port, socket); 389 } 390 391 RTE_LOG(INFO, APP, "time stamp clock running at %" PRIu64 " Hz\n", 392 rte_get_timer_hz()); 393 394 RTE_LOG(INFO, APP, "Ring sizes: NIC RX = %u, Mempool = %d SW queue = %u," 395 "NIC TX = %u\n", ring_conf.rx_size, mp_size, ring_conf.ring_size, 396 ring_conf.tx_size); 397 398 RTE_LOG(INFO, APP, "Burst sizes: RX read = %hu, RX write = %hu,\n" 399 " Worker read/QoS enqueue = %hu,\n" 400 " QoS dequeue = %hu, Worker write = %hu\n", 401 burst_conf.rx_burst, burst_conf.ring_burst, burst_conf.ring_burst, 402 burst_conf.qos_dequeue, burst_conf.tx_burst); 403 404 RTE_LOG(INFO, APP, "NIC thresholds RX (p = %hhu, h = %hhu, w = %hhu)," 405 "TX (p = %hhu, h = %hhu, w = %hhu)\n", 406 rx_thresh.pthresh, rx_thresh.hthresh, rx_thresh.wthresh, 407 tx_thresh.pthresh, tx_thresh.hthresh, tx_thresh.wthresh); 408 409 return 0; 410 } 411