1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <memory.h> 7 8 #include <rte_log.h> 9 #include <rte_mbuf.h> 10 #include <rte_debug.h> 11 #include <rte_ethdev.h> 12 #include <rte_mempool.h> 13 #include <rte_sched.h> 14 #include <rte_cycles.h> 15 #include <rte_string_fns.h> 16 #include <rte_cfgfile.h> 17 18 #include "main.h" 19 #include "cfg_file.h" 20 21 uint32_t app_numa_mask = 0; 22 static uint32_t app_inited_port_mask = 0; 23 24 int app_pipe_to_profile[MAX_SCHED_SUBPORTS][MAX_SCHED_PIPES]; 25 26 #define MAX_NAME_LEN 32 27 28 struct ring_conf ring_conf = { 29 .rx_size = APP_RX_DESC_DEFAULT, 30 .ring_size = APP_RING_SIZE, 31 .tx_size = APP_TX_DESC_DEFAULT, 32 }; 33 34 struct burst_conf burst_conf = { 35 .rx_burst = MAX_PKT_RX_BURST, 36 .ring_burst = PKT_ENQUEUE, 37 .qos_dequeue = PKT_DEQUEUE, 38 .tx_burst = MAX_PKT_TX_BURST, 39 }; 40 41 struct ring_thresh rx_thresh = { 42 .pthresh = RX_PTHRESH, 43 .hthresh = RX_HTHRESH, 44 .wthresh = RX_WTHRESH, 45 }; 46 47 struct ring_thresh tx_thresh = { 48 .pthresh = TX_PTHRESH, 49 .hthresh = TX_HTHRESH, 50 .wthresh = TX_WTHRESH, 51 }; 52 53 uint32_t nb_pfc; 54 const char *cfg_profile = NULL; 55 int mp_size = NB_MBUF; 56 struct flow_conf qos_conf[MAX_DATA_STREAMS]; 57 58 static const struct rte_eth_conf port_conf = { 59 .rxmode = { 60 .max_rx_pkt_len = ETHER_MAX_LEN, 61 .split_hdr_size = 0, 62 .header_split = 0, /**< Header Split disabled */ 63 .hw_ip_checksum = 0, /**< IP checksum offload disabled */ 64 .hw_vlan_filter = 0, /**< VLAN filtering disabled */ 65 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ 66 .hw_strip_crc = 1, /**< CRC stripped by hardware */ 67 }, 68 .txmode = { 69 .mq_mode = ETH_DCB_NONE, 70 }, 71 }; 72 73 static int 74 app_init_port(uint16_t portid, struct rte_mempool *mp) 75 { 76 int ret; 77 struct rte_eth_link link; 78 struct rte_eth_rxconf rx_conf; 79 struct rte_eth_txconf tx_conf; 80 uint16_t rx_size; 81 uint16_t tx_size; 82 83 /* check if port already initialized (multistream configuration) */ 84 if (app_inited_port_mask & (1u << portid)) 85 return 0; 86 87 rx_conf.rx_thresh.pthresh = rx_thresh.pthresh; 88 rx_conf.rx_thresh.hthresh = rx_thresh.hthresh; 89 rx_conf.rx_thresh.wthresh = rx_thresh.wthresh; 90 rx_conf.rx_free_thresh = 32; 91 rx_conf.rx_drop_en = 0; 92 rx_conf.rx_deferred_start = 0; 93 94 tx_conf.tx_thresh.pthresh = tx_thresh.pthresh; 95 tx_conf.tx_thresh.hthresh = tx_thresh.hthresh; 96 tx_conf.tx_thresh.wthresh = tx_thresh.wthresh; 97 tx_conf.tx_free_thresh = 0; 98 tx_conf.tx_rs_thresh = 0; 99 tx_conf.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS; 100 tx_conf.tx_deferred_start = 0; 101 102 /* init port */ 103 RTE_LOG(INFO, APP, "Initializing port %"PRIu16"... ", portid); 104 fflush(stdout); 105 ret = rte_eth_dev_configure(portid, 1, 1, &port_conf); 106 if (ret < 0) 107 rte_exit(EXIT_FAILURE, 108 "Cannot configure device: err=%d, port=%u\n", 109 ret, portid); 110 111 rx_size = ring_conf.rx_size; 112 tx_size = ring_conf.tx_size; 113 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &rx_size, &tx_size); 114 if (ret < 0) 115 rte_exit(EXIT_FAILURE, 116 "rte_eth_dev_adjust_nb_rx_tx_desc: err=%d,port=%u\n", 117 ret, portid); 118 ring_conf.rx_size = rx_size; 119 ring_conf.tx_size = tx_size; 120 121 /* init one RX queue */ 122 fflush(stdout); 123 ret = rte_eth_rx_queue_setup(portid, 0, (uint16_t)ring_conf.rx_size, 124 rte_eth_dev_socket_id(portid), &rx_conf, mp); 125 if (ret < 0) 126 rte_exit(EXIT_FAILURE, 127 "rte_eth_tx_queue_setup: err=%d, port=%u\n", 128 ret, portid); 129 130 /* init one TX queue */ 131 fflush(stdout); 132 ret = rte_eth_tx_queue_setup(portid, 0, 133 (uint16_t)ring_conf.tx_size, rte_eth_dev_socket_id(portid), &tx_conf); 134 if (ret < 0) 135 rte_exit(EXIT_FAILURE, 136 "rte_eth_tx_queue_setup: err=%d, port=%u queue=%d\n", 137 ret, portid, 0); 138 139 /* Start device */ 140 ret = rte_eth_dev_start(portid); 141 if (ret < 0) 142 rte_exit(EXIT_FAILURE, 143 "rte_pmd_port_start: err=%d, port=%u\n", 144 ret, portid); 145 146 printf("done: "); 147 148 /* get link status */ 149 rte_eth_link_get(portid, &link); 150 if (link.link_status) { 151 printf(" Link Up - speed %u Mbps - %s\n", 152 (uint32_t) link.link_speed, 153 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 154 ("full-duplex") : ("half-duplex\n")); 155 } else { 156 printf(" Link Down\n"); 157 } 158 rte_eth_promiscuous_enable(portid); 159 160 /* mark port as initialized */ 161 app_inited_port_mask |= 1u << portid; 162 163 return 0; 164 } 165 166 static struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS] = { 167 { 168 .tb_rate = 1250000000, 169 .tb_size = 1000000, 170 171 .tc_rate = {1250000000, 1250000000, 1250000000, 1250000000}, 172 .tc_period = 10, 173 }, 174 }; 175 176 static struct rte_sched_pipe_params pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PORT] = { 177 { /* Profile #0 */ 178 .tb_rate = 305175, 179 .tb_size = 1000000, 180 181 .tc_rate = {305175, 305175, 305175, 305175}, 182 .tc_period = 40, 183 #ifdef RTE_SCHED_SUBPORT_TC_OV 184 .tc_ov_weight = 1, 185 #endif 186 187 .wrr_weights = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, 188 }, 189 }; 190 191 struct rte_sched_port_params port_params = { 192 .name = "port_scheduler_0", 193 .socket = 0, /* computed */ 194 .rate = 0, /* computed */ 195 .mtu = 6 + 6 + 4 + 4 + 2 + 1500, 196 .frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT, 197 .n_subports_per_port = 1, 198 .n_pipes_per_subport = 4096, 199 .qsize = {64, 64, 64, 64}, 200 .pipe_profiles = pipe_profiles, 201 .n_pipe_profiles = sizeof(pipe_profiles) / sizeof(struct rte_sched_pipe_params), 202 203 #ifdef RTE_SCHED_RED 204 .red_params = { 205 /* Traffic Class 0 Colors Green / Yellow / Red */ 206 [0][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, 207 [0][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, 208 [0][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, 209 210 /* Traffic Class 1 - Colors Green / Yellow / Red */ 211 [1][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, 212 [1][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, 213 [1][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, 214 215 /* Traffic Class 2 - Colors Green / Yellow / Red */ 216 [2][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, 217 [2][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, 218 [2][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, 219 220 /* Traffic Class 3 - Colors Green / Yellow / Red */ 221 [3][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, 222 [3][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}, 223 [3][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9} 224 } 225 #endif /* RTE_SCHED_RED */ 226 }; 227 228 static struct rte_sched_port * 229 app_init_sched_port(uint32_t portid, uint32_t socketid) 230 { 231 static char port_name[32]; /* static as referenced from global port_params*/ 232 struct rte_eth_link link; 233 struct rte_sched_port *port = NULL; 234 uint32_t pipe, subport; 235 int err; 236 237 rte_eth_link_get(portid, &link); 238 239 port_params.socket = socketid; 240 port_params.rate = (uint64_t) link.link_speed * 1000 * 1000 / 8; 241 snprintf(port_name, sizeof(port_name), "port_%d", portid); 242 port_params.name = port_name; 243 244 port = rte_sched_port_config(&port_params); 245 if (port == NULL){ 246 rte_exit(EXIT_FAILURE, "Unable to config sched port\n"); 247 } 248 249 for (subport = 0; subport < port_params.n_subports_per_port; subport ++) { 250 err = rte_sched_subport_config(port, subport, &subport_params[subport]); 251 if (err) { 252 rte_exit(EXIT_FAILURE, "Unable to config sched subport %u, err=%d\n", 253 subport, err); 254 } 255 256 for (pipe = 0; pipe < port_params.n_pipes_per_subport; pipe ++) { 257 if (app_pipe_to_profile[subport][pipe] != -1) { 258 err = rte_sched_pipe_config(port, subport, pipe, 259 app_pipe_to_profile[subport][pipe]); 260 if (err) { 261 rte_exit(EXIT_FAILURE, "Unable to config sched pipe %u " 262 "for profile %d, err=%d\n", pipe, 263 app_pipe_to_profile[subport][pipe], err); 264 } 265 } 266 } 267 } 268 269 return port; 270 } 271 272 static int 273 app_load_cfg_profile(const char *profile) 274 { 275 if (profile == NULL) 276 return 0; 277 struct rte_cfgfile *file = rte_cfgfile_load(profile, 0); 278 if (file == NULL) 279 rte_exit(EXIT_FAILURE, "Cannot load configuration profile %s\n", profile); 280 281 cfg_load_port(file, &port_params); 282 cfg_load_subport(file, subport_params); 283 cfg_load_pipe(file, pipe_profiles); 284 285 rte_cfgfile_close(file); 286 287 return 0; 288 } 289 290 int app_init(void) 291 { 292 uint32_t i; 293 char ring_name[MAX_NAME_LEN]; 294 char pool_name[MAX_NAME_LEN]; 295 296 if (rte_eth_dev_count() == 0) 297 rte_exit(EXIT_FAILURE, "No Ethernet port - bye\n"); 298 299 /* load configuration profile */ 300 if (app_load_cfg_profile(cfg_profile) != 0) 301 rte_exit(EXIT_FAILURE, "Invalid configuration profile\n"); 302 303 /* Initialize each active flow */ 304 for(i = 0; i < nb_pfc; i++) { 305 uint32_t socket = rte_lcore_to_socket_id(qos_conf[i].rx_core); 306 struct rte_ring *ring; 307 308 snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].rx_core); 309 ring = rte_ring_lookup(ring_name); 310 if (ring == NULL) 311 qos_conf[i].rx_ring = rte_ring_create(ring_name, ring_conf.ring_size, 312 socket, RING_F_SP_ENQ | RING_F_SC_DEQ); 313 else 314 qos_conf[i].rx_ring = ring; 315 316 snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].tx_core); 317 ring = rte_ring_lookup(ring_name); 318 if (ring == NULL) 319 qos_conf[i].tx_ring = rte_ring_create(ring_name, ring_conf.ring_size, 320 socket, RING_F_SP_ENQ | RING_F_SC_DEQ); 321 else 322 qos_conf[i].tx_ring = ring; 323 324 325 /* create the mbuf pools for each RX Port */ 326 snprintf(pool_name, MAX_NAME_LEN, "mbuf_pool%u", i); 327 qos_conf[i].mbuf_pool = rte_pktmbuf_pool_create(pool_name, 328 mp_size, burst_conf.rx_burst * 4, 0, 329 RTE_MBUF_DEFAULT_BUF_SIZE, 330 rte_eth_dev_socket_id(qos_conf[i].rx_port)); 331 if (qos_conf[i].mbuf_pool == NULL) 332 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool for socket %u\n", i); 333 334 app_init_port(qos_conf[i].rx_port, qos_conf[i].mbuf_pool); 335 app_init_port(qos_conf[i].tx_port, qos_conf[i].mbuf_pool); 336 337 qos_conf[i].sched_port = app_init_sched_port(qos_conf[i].tx_port, socket); 338 } 339 340 RTE_LOG(INFO, APP, "time stamp clock running at %" PRIu64 " Hz\n", 341 rte_get_timer_hz()); 342 343 RTE_LOG(INFO, APP, "Ring sizes: NIC RX = %u, Mempool = %d SW queue = %u," 344 "NIC TX = %u\n", ring_conf.rx_size, mp_size, ring_conf.ring_size, 345 ring_conf.tx_size); 346 347 RTE_LOG(INFO, APP, "Burst sizes: RX read = %hu, RX write = %hu,\n" 348 " Worker read/QoS enqueue = %hu,\n" 349 " QoS dequeue = %hu, Worker write = %hu\n", 350 burst_conf.rx_burst, burst_conf.ring_burst, burst_conf.ring_burst, 351 burst_conf.qos_dequeue, burst_conf.tx_burst); 352 353 RTE_LOG(INFO, APP, "NIC thresholds RX (p = %hhu, h = %hhu, w = %hhu)," 354 "TX (p = %hhu, h = %hhu, w = %hhu)\n", 355 rx_thresh.pthresh, rx_thresh.hthresh, rx_thresh.wthresh, 356 tx_thresh.pthresh, tx_thresh.hthresh, tx_thresh.wthresh); 357 358 return 0; 359 } 360